repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
jkulhanek/nerfbaselines | nerfbaselines/registry.py | [
{
"identifier": "Method",
"path": "nerfbaselines/types.py",
"snippet": "class Method(Protocol):\n @classmethod\n def install(cls):\n \"\"\"\n Install the method.\n \"\"\"\n pass\n\n @abstractmethod\n def get_info(self) -> MethodInfo:\n \"\"\"\n Get method defaults for the trainer.\n\n Returns:\n Method info.\n \"\"\"\n return MethodInfo()\n\n @abstractmethod\n def render(self, cameras: Cameras, progress_callback: Optional[ProgressCallback] = None) -> Iterable[RenderOutput]: # [h w c]\n \"\"\"\n Render images.\n\n Args:\n cameras: Cameras.\n progress_callback: Callback for progress.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def setup_train(self, train_dataset: Dataset, *, num_iterations: int):\n \"\"\"\n Setup training data, model, optimizer, etc.\n\n Args:\n train_dataset: Training dataset.\n num_iterations: Number of iterations to train.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def train_iteration(self, step: int):\n \"\"\"\n Train one iteration.\n\n Args:\n step: Current step.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def save(self, path: Path):\n \"\"\"\n Save model.\n\n Args:\n path: Path to save.\n \"\"\"\n raise NotImplementedError()"
},
{
"identifier": "DockerMethod",
"path": "nerfbaselines/backends/docker.py",
"snippet": "class DockerMethod(RemoteProcessMethod):\n _local_address = \"0.0.0.0\"\n _export_envs = [\"TCNN_CUDA_ARCHITECTURES\", \"TORCH_CUDA_ARCH_LIST\", \"CUDAARCHS\", \"GITHUB_ACTIONS\", \"NB_PORT\", \"NB_PATH\", \"NB_AUTHKEY\", \"NB_ARGS\"]\n _package_path = \"/var/nb-package\"\n _replace_user = True\n image: Optional[str] = None\n mounts: Optional[List[Tuple[str, str]]] = None\n home_path: str = \"/root\"\n environments_path: str = \"/var/nb-prefix/docker-conda-envs\"\n\n def __init__(self, *args, mounts: Optional[List[Tuple[str, str]]] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.mounts = list((mounts or []) + (self.mounts or []))\n assert self.image is not None, \"DockerMethod requires an image\"\n\n @classmethod\n def to_apptainer(cls):\n if cls == DockerMethod:\n return ApptainerMethod\n elif len(cls.__bases__) > 0 and DockerMethod == cls.__bases__[0]:\n bases = tuple(ApptainerMethod if b == DockerMethod else b for b in cls.__bases__)\n\n def build(ns):\n ns[\"__module__\"] = cls.__module__\n ns[\"__doc__\"] = cls.__doc__\n for k, v in cls.__dict__.items():\n ns[k] = v\n if \"__init__\" in ns:\n old_init = ns[\"__init__\"]\n kwargs = getattr(old_init, \"__kwargs__\", {})\n if \"image\" in kwargs:\n kwargs[\"image\"] = \"docker://\" + kwargs[\"image\"]\n ns[\"__init__\"] = partialmethod(ApptainerMethod.__init__, *getattr(old_init, \"__args__\", tuple()), **kwargs)\n ns[\"image\"] = \"docker://\" + ns[\"image\"]\n return ns\n\n return types.new_class(cls.__name__, bases=bases, exec_body=build)\n else:\n raise TypeError(f\"Cannot convert {cls} to ApptainerMethod\")\n\n @property\n def shared_path(self) -> Optional[Tuple[str, str]]:\n if self._tmp_shared_dir is None:\n return None\n return (self._tmp_shared_dir.name, \"/nb-shared\")\n\n @classmethod\n def _get_install_args(cls) -> Optional[List[str]]:\n assert cls.image is not None, \"DockerMethod requires an image\"\n sub_args = super(DockerMethod, cls)._get_install_args() # pylint: disable=assignment-from-none\n if sub_args is None:\n sub_args = [\"true\"]\n os.makedirs(os.path.expanduser(\"~/.conda/pkgs\"), exist_ok=True)\n os.makedirs(os.path.expanduser(\"~/.cache/pip\"), exist_ok=True)\n torch_home = os.path.expanduser(os.environ.get(\"TORCH_HOME\", \"~/.cache/torch/hub\"))\n os.makedirs(torch_home, exist_ok=True)\n os.makedirs(os.path.join(NB_PREFIX, \"docker-conda-envs\"), exist_ok=True)\n uid_gid = \":\".join(list(map(str, (os.getuid(), os.getgid()))))\n use_gpu = True\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\":\n # GitHub Actions does not support GPU\n use_gpu = False\n return [\n \"bash\",\n \"-c\",\n f\"docker pull {shlex.quote(cls.image)} && \"\n + shlex.join(\n [\n \"docker\",\n \"run\",\n *(\n (\n \"--user\",\n uid_gid,\n \"-v=/etc/group:/etc/group:ro\",\n \"-v=/etc/passwd:/etc/passwd:ro\",\n \"-v=/etc/shadow:/etc/shadow:ro\",\n \"--env\",\n f\"HOME={shlex.quote(cls.home_path)}\",\n )\n if cls._replace_user\n else ()\n ),\n *(\n (\n \"--gpus\",\n \"all\",\n )\n if use_gpu\n else ()\n ),\n \"--workdir\",\n os.getcwd(),\n \"-v\",\n shlex.quote(os.getcwd()) + \":\" + shlex.quote(os.getcwd()),\n \"-v\",\n shlex.quote(NB_PREFIX) + \":/var/nb-prefix\",\n \"-v\",\n shlex.quote(PACKAGE_PATH) + \":\" + shlex.quote(cls._package_path),\n \"-v\",\n shlex.quote(os.path.expanduser(\"~/.conda/pkgs\")) + \":/var/nb-conda-pkgs\",\n \"-v\",\n shlex.quote(os.path.expanduser(\"~/.cache/pip\")) + \":/var/nb-pip-cache\",\n \"-v\",\n shlex.quote(torch_home) + \":/var/nb-torch\",\n *[f\"-v={shlex.quote(src)}:{shlex.quote(dst)}\" for src, dst in cls.mounts or []],\n \"--env\",\n \"NB_PREFIX=/var/nb-prefix\",\n \"--env\",\n \"CONDA_PKGS_DIRS=/var/nb-conda-pkgs\",\n \"--env\",\n \"PIP_CACHE_DIR=/var/nb-pip-cache\",\n \"--env\",\n \"TORCH_HOME=/var/nb-torch\",\n \"--env\",\n \"NB_USE_GPU=\" + (\"1\" if use_gpu else \"0\"),\n *(sum(([\"--env\", name] for name in cls._export_envs), [])),\n \"--rm\",\n cls.image,\n ]\n + sub_args\n ),\n ]\n\n def _get_server_process_args(self, env, *args, **kwargs):\n python_args = super()._get_server_process_args(env, *args, **kwargs)\n os.makedirs(os.path.expanduser(\"~/.conda/pkgs\"), exist_ok=True)\n torch_home = os.path.expanduser(os.environ.get(\"TORCH_HOME\", \"~/.cache/torch/hub\"))\n os.makedirs(torch_home, exist_ok=True)\n os.makedirs(os.path.join(NB_PREFIX, \"docker-conda-envs\"), exist_ok=True)\n uid_gid = \":\".join(list(map(str, (os.getuid(), os.getgid()))))\n use_gpu = True\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\":\n # GitHub Actions does not support GPU\n use_gpu = False\n return [\n \"docker\",\n \"run\",\n *(\n (\n \"--user\",\n uid_gid,\n \"-v=/etc/group:/etc/group:ro\",\n \"-v=/etc/passwd:/etc/passwd:ro\",\n \"-v=/etc/shadow:/etc/shadow:ro\",\n \"--env\",\n f\"HOME={shlex.quote(self.home_path)}\",\n )\n if self._replace_user\n else ()\n ),\n *(\n (\n \"--gpus\",\n \"all\",\n )\n if use_gpu\n else ()\n ),\n \"--workdir\",\n os.getcwd(),\n \"-v\",\n shlex.quote(os.getcwd()) + \":\" + shlex.quote(os.getcwd()),\n \"-v\",\n shlex.quote(NB_PREFIX) + \":/var/nb-prefix\",\n \"-v\",\n shlex.quote(PACKAGE_PATH) + \":\" + shlex.quote(self._package_path),\n *((\"-v\", shlex.quote(self.shared_path[0]) + \":\" + shlex.quote(self.shared_path[1])) if self.shared_path is not None else []),\n \"-v\",\n shlex.quote(torch_home) + \":/var/nb-torch\",\n *[f\"-v={shlex.quote(src)}:{shlex.quote(dst)}\" for src, dst in self.mounts or []],\n *([f\"-v={shlex.quote(str(self.checkpoint))}:{shlex.quote(str(self.checkpoint))}:ro\"] if self.checkpoint is not None else []),\n \"--env\",\n \"CONDA_PKGS_DIRS=/var/nb-conda-pkgs\",\n \"--env\",\n \"PIP_CACHE_DIR=/var/nb-pip-cache\",\n \"--env\",\n \"TORCH_HOME=/var/nb-torch\",\n \"--env\",\n \"NB_PREFIX=/var/nb-prefix\",\n \"--env\",\n \"NB_USE_GPU=\" + (\"1\" if use_gpu else \"0\"),\n *(sum(([\"--env\", name] for name in self._export_envs), [])),\n \"-p\",\n f\"{self.connection_params.port}:{self.connection_params.port}\",\n \"--rm\",\n (\"-it\" if env.get(\"_NB_IS_DOCKERFILE\") == \"1\" else \"-i\"),\n self.image,\n ] + python_args\n\n @classmethod\n def get_dockerfile(cls):\n sub_args = super(DockerMethod, cls)._get_install_args() # pylint: disable=assignment-from-none\n script = f\"FROM {cls.image}\\n\"\n if sub_args:\n args_safe = []\n for arg in sub_args: # pylint: disable=not-an-iterable\n if \"\\n\" in arg:\n arg = shlex.quote(arg)\n arg = arg.replace(\"\\n\", \" \\\\n\\\\\\n\")\n args_safe.append(f'\"$(echo {arg})\"')\n else:\n args_safe.append(shlex.quote(arg))\n script += \"RUN \" + \" \".join(args_safe) + \"\\n\"\n if cls.python_path != \"python\":\n script += f'RUN ln -s \"$(which {cls.python_path})\" \"/usr/bin/python\"' + \"\\n\"\n env = cls._get_isolated_env()\n env[\"_NB_IS_DOCKERFILE\"] = \"1\"\n entrypoint = super()._get_server_process_args(env)\n script += \"ENTRYPOINT [\" + \", \".join(\"'\" + x.rstrip(\"\\n\") + \"'\" for x in entrypoint) + \"]\\n\"\n return script"
},
{
"identifier": "ApptainerMethod",
"path": "nerfbaselines/backends/apptainer.py",
"snippet": "class ApptainerMethod(RemoteProcessMethod):\n _local_address = \"0.0.0.0\"\n _export_envs = [\"TCNN_CUDA_ARCHITECTURES\", \"TORCH_CUDA_ARCH_LIST\", \"CUDAARCHS\", \"GITHUB_ACTIONS\", \"NB_PORT\", \"NB_PATH\", \"NB_AUTHKEY\", \"NB_ARGS\", \"NB_PREFIX\"]\n _package_path = \"/var/nb-package\"\n image: Optional[str] = None\n mounts: Optional[List[Tuple[str, str]]] = None\n home_path: str = \"/root\"\n environments_path: str = \"/var/nb-prefix/apptainer-conda-envs\"\n\n def __init__(self, *args, mounts: Optional[List[Tuple[str, str]]] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.mounts = list((mounts or []) + (self.mounts or []))\n assert self.image is not None, \"ApptainerMethod requires an image\"\n\n @classmethod\n def _get_isolated_env(cls):\n out = super(ApptainerMethod, cls)._get_isolated_env()\n allowed = {\"APPTAINER_IMAGES\", \"APPTAINER_CACHEDIR\"}\n out.update({k: v for k, v in os.environ.items() if k in allowed})\n return out\n\n @property\n def shared_path(self) -> Optional[Tuple[str, str]]:\n if self._tmp_shared_dir is None:\n return None\n return (self._tmp_shared_dir.name, \"/nb-shared\")\n\n @classmethod\n def _get_install_args(cls) -> Optional[List[str]]:\n sub_args = super(ApptainerMethod, cls)._get_install_args() # pylint: disable=assignment-from-none\n if sub_args is None:\n sub_args = [\"true\"]\n os.makedirs(os.path.join(NB_PREFIX, \"apptainer-conda-envs\"), exist_ok=True)\n conda_cache = os.path.expanduser(os.environ.get(\"CONDA_PKGS_DIRS\", \"~/.conda/pkgs\"))\n os.makedirs(conda_cache, exist_ok=True)\n pip_cache = os.path.expanduser(os.environ.get(\"PIP_CACHE_DIR\", \"~/.cache/pip\"))\n os.makedirs(pip_cache, exist_ok=True)\n torch_home = os.path.expanduser(os.environ.get(\"TORCH_HOME\", \"~/.cache/torch/hub\"))\n os.makedirs(torch_home, exist_ok=True)\n use_gpu = True\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\":\n # GitHub Actions does not support GPU\n use_gpu = False\n return [\n \"apptainer\",\n \"exec\",\n # \"--containall\",\n \"--cleanenv\",\n *((\"--nv\",) if use_gpu else ()),\n \"--bind\",\n \"/tmp:/tmp\",\n \"--writable-tmpfs\",\n \"--no-home\",\n \"-H\",\n cls.home_path,\n \"--workdir\",\n os.getcwd(),\n \"--bind\",\n shlex.quote(os.getcwd()) + \":\" + shlex.quote(os.getcwd()),\n \"--bind\",\n shlex.quote(PACKAGE_PATH) + \":\" + shlex.quote(cls._package_path),\n \"--bind\",\n shlex.quote(NB_PREFIX) + \":/var/nb-prefix\",\n \"--bind\",\n shlex.quote(conda_cache) + \":/var/nb-conda-pkgs\",\n \"--bind\",\n shlex.quote(pip_cache) + \":/var/nb-pip-cache\",\n \"--bind\",\n shlex.quote(torch_home) + \":/var/nb-torch\",\n *[f\"--bind={shlex.quote(src)}:{shlex.quote(dst)}\" for src, dst in cls.mounts or []],\n \"--env\",\n \"NB_USE_GPU=\" + (\"1\" if use_gpu else \"0\"),\n \"--env\",\n \"CONDA_PKGS_DIRS=/var/nb-conda-pkgs\",\n \"--env\",\n \"PIP_CACHE_DIR=/var/nb-pip-cache\",\n \"--env\",\n \"TORCH_HOME=/var/nb-torch\",\n \"--env\",\n \"NB_PREFIX=/var/nb-prefix\",\n \"--env\",\n \"COLUMNS=120\",\n *(sum(([\"--env\", f\"{name}={shlex.quote(os.environ.get(name, ''))}\"] for name in cls._export_envs if name in os.environ), [])),\n cls.image,\n ] + sub_args\n\n def _get_server_process_args(self, env, *args, **kwargs):\n python_args = super()._get_server_process_args(env, *args, **kwargs)\n os.makedirs(os.path.join(NB_PREFIX, \"apptainer-conda-envs\"), exist_ok=True)\n conda_cache = os.path.expanduser(env.get(\"CONDA_PKGS_DIRS\", \"~/.conda/pkgs\"))\n os.makedirs(conda_cache, exist_ok=True)\n pip_cache = os.path.expanduser(env.get(\"PIP_CACHE_DIR\", \"~/.cache/pip\"))\n os.makedirs(pip_cache, exist_ok=True)\n torch_home = os.path.expanduser(env.get(\"TORCH_HOME\", \"~/.cache/torch/hub\"))\n os.makedirs(torch_home, exist_ok=True)\n use_gpu = True\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\":\n # GitHub Actions does not support GPU\n use_gpu = False\n return [\n \"apptainer\",\n \"exec\",\n # \"--containall\",\n \"--cleanenv\",\n \"--writable-tmpfs\",\n *((\"--nv\",) if use_gpu else ()),\n \"--bind\",\n \"/tmp:/tmp\",\n \"--writable-tmpfs\",\n \"--no-home\",\n \"-H\",\n self.home_path,\n \"--workdir\",\n os.getcwd(),\n \"--bind\",\n shlex.quote(os.getcwd()) + \":\" + shlex.quote(os.getcwd()),\n \"--bind\",\n shlex.quote(PACKAGE_PATH) + \":\" + shlex.quote(self._package_path),\n *((\"--bind\", shlex.quote(self.shared_path[0]) + \":\" + shlex.quote(self.shared_path[1])) if self.shared_path is not None else []),\n \"--bind\",\n shlex.quote(NB_PREFIX) + \":/var/nb-prefix\",\n \"--bind\",\n shlex.quote(conda_cache) + \":/var/nb-conda-pkgs\",\n \"--bind\",\n shlex.quote(pip_cache) + \":/var/nb-pip-cache\",\n \"--bind\",\n shlex.quote(torch_home) + \":/var/nb-torch\",\n *[f\"--bind={shlex.quote(src)}:{shlex.quote(dst)}\" for src, dst in self.mounts or []],\n *([f\"--bind={shlex.quote(str(self.checkpoint))}:{shlex.quote(str(self.checkpoint))}:ro\"] if self.checkpoint is not None else []),\n *(sum(([\"--env\", f\"{name}={shlex.quote(env.get(name, ''))}\"] for name in self._export_envs if name in env), [])),\n \"--env\",\n \"NB_USE_GPU=\" + (\"1\" if use_gpu else \"0\"),\n \"--env\",\n \"CONDA_PKGS_DIRS=/var/nb-conda-pkgs\",\n \"--env\",\n \"NB_PREFIX=/var/nb-prefix\",\n \"--env\",\n \"PIP_CACHE_DIR=/var/nb-pip-cache\",\n \"--env\",\n \"TORCH_HOME=/var/nb-torch\",\n \"--env\",\n \"COLUMNS=120\",\n self.image,\n ] + python_args"
},
{
"identifier": "CondaMethod",
"path": "nerfbaselines/backends/conda.py",
"snippet": "class CondaMethod(RemoteProcessMethod):\n conda_name: Optional[str] = None\n environment: Optional[str] = None\n python_version: Optional[str] = None\n install_script: Optional[str] = None\n environments_path: str = os.path.join(NB_PREFIX, \"conda-envs\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, python_path=\"python\", **kwargs)\n assert self.conda_name is not None, \"CondaMethod requires conda_name to be specified\"\n\n @classmethod\n def get_environment_hash(cls):\n value = hashlib.sha256()\n if cls.python_version is not None:\n value.update(cls.python_version.encode(\"utf8\"))\n if cls.environment is not None:\n value.update(cls.environment.encode(\"utf8\"))\n if cls.install_script is not None:\n value.update(cls.install_script.encode(\"utf8\"))\n return value.hexdigest()\n\n def _wrap_server_call(self, args):\n return\n\n @classmethod\n def _get_install_args(cls):\n assert cls.conda_name is not None, \"CondaMethod requires conda_name to be specified\"\n environment_hash = cls.get_environment_hash()\n env_root_path = os.path.join(cls.environments_path, cls.conda_name, environment_hash)\n env_path = os.path.join(env_root_path, \".e\", cls.conda_name)\n args = []\n if cls.python_version is not None:\n args.append(f\"python={cls.python_version}\")\n sub_install = \"\"\n sub_install_args = super()._get_install_args() # pylint: disable=assignment-from-none\n if sub_install_args:\n sub_install = shlex.join(sub_install_args)\n script = f\"\"\"set -eo pipefail\n# Clear old environments\nif [ -d {shlex.quote(os.path.dirname(env_root_path))} ]; then\n for hash in $(ls -1 {shlex.quote(os.path.dirname(env_root_path))}); do\n if [ \"$hash\" != {shlex.quote(environment_hash)} ]; then\n rm -rf {shlex.quote(os.path.dirname(env_root_path))}\"/$hash\"\n fi\n done\nfi\n# Create new environment\neval \"$(conda shell.bash hook)\"\nif [ ! -e {shlex.quote(os.path.join(env_root_path, \".ack.txt\"))} ]; then\nrm -rf {shlex.quote(env_root_path)}\nmkdir -p {shlex.quote(os.path.dirname(env_path))}\n{shlex.join([\"conda\", \"create\", \"--prefix\", env_path, \"-y\"] + args)}\nconda activate {shlex.quote(env_path)}\ncd {shlex.quote(env_root_path)}\n{cls.install_script}\ntouch {shlex.quote(os.path.join(env_root_path, \".ack.txt\"))}\necho \"0\" > {shlex.quote(os.path.join(env_root_path, \".ack.txt\"))}\nfi\n{sub_install}\n\"\"\"\n return [\"bash\", \"-c\", script]\n\n def _get_server_process_args(self, env, *args, **kwargs):\n assert self.conda_name is not None, \"CondaMethod requires conda_name to be specified\"\n return [\n \"bash\",\n \"-c\",\n f\"\"\"eval \"$(conda shell.bash hook)\" && \\\nconda activate {os.path.join(self.environments_path, self.conda_name, self.get_environment_hash(), \".e\", self.conda_name)} && \\\nexec {shlex.join(super()._get_server_process_args(env, *args, **kwargs))}\n\"\"\",\n ]"
},
{
"identifier": "partialclass",
"path": "nerfbaselines/utils.py",
"snippet": "def partialclass(cls, *args, **kwargs):\n def build(ns):\n cls_dict = cls.__dict__\n ns[\"__module__\"] = cls_dict[\"__module__\"]\n ns[\"__doc__\"] = cls_dict[\"__doc__\"]\n if args or kwargs:\n ns[\"__init__\"] = partialmethod(cls.__init__, *args, **kwargs)\n return ns\n\n return types.new_class(cls.__name__, bases=(cls,), exec_body=build)"
}
] | import inspect
import types
import typing
import os
import importlib
import dataclasses
import subprocess
from typing import Optional, Type, Any, Tuple, Dict, Set
from typing import Literal
from typing_extensions import Literal # type: ignore
from typing import FrozenSet
from typing_extensions import FrozenSet # type: ignore
from typing import get_args
from typing_extensions import get_args # type: ignore
from dataclasses import dataclass, field
from .types import Method
from .backends import DockerMethod, CondaMethod, ApptainerMethod
from .utils import partialclass
from . import methods
from . import methods | 6,114 |
try:
except ImportError:
try:
except ImportError:
try:
except ImportError:
DEFAULT_DOCKER_IMAGE = "kulhanek/nerfbaselines:v1"
Backend = Literal["conda", "docker", "apptainer", "python"]
ALL_BACKENDS = list(get_args(Backend))
registry = {}
# Auto register
_auto_register_completed = False
def auto_register(force=False):
global _auto_register_completed
if _auto_register_completed and not force:
return
# TODO: do this more robustly
for package in os.listdir(os.path.dirname(methods.__file__)):
if package.endswith(".py") and not package.startswith("_") and package != "__init__.py":
package = package[:-3]
importlib.import_module(f".methods.{package}", __package__)
_auto_register_completed = True
def register(spec: "MethodSpec", name: str, *args, metadata=None, **kwargs) -> "MethodSpec":
assert name not in registry, f"Method {name} already registered"
if metadata is None:
metadata = {}
metadata = {**spec.metadata, **metadata}
spec = dataclasses.replace(spec, args=spec.args + args, kwargs={**spec.kwargs, **kwargs}, metadata=metadata)
registry[name] = spec
return spec
class _LazyMethodMeta(type):
def __getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
module, name = __name
module_base = methods.__package__
def build(ns):
def new(ncls, *args, **kwargs):
old_init = ncls.__init__
# For partialclass
if hasattr(old_init, "__original_func__"):
args = old_init.__args__ + args
kwargs = {**old_init.__kwargs__, **kwargs}
mod = importlib.import_module(module, methods.__package__)
ncls = getattr(mod, name)
assert inspect.isclass(ncls)
return ncls(*args, **kwargs)
ns["__new__"] = new
ncls = types.new_class(name, exec_body=build, bases=(Method,))
ncls.__module__ = module_base + module if module.startswith(".") else module
ncls.__name__ = name
return typing.cast(Type[Method], ncls)
class LazyMethod(object, metaclass=_LazyMethodMeta):
def __class_getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
return _LazyMethodMeta.__getitem__(cls, __name)
@dataclass(frozen=True)
class MethodSpec:
method: Type[Method]
conda: Optional[Type[CondaMethod]] = None
docker: Optional[Type[DockerMethod]] = None
|
try:
except ImportError:
try:
except ImportError:
try:
except ImportError:
DEFAULT_DOCKER_IMAGE = "kulhanek/nerfbaselines:v1"
Backend = Literal["conda", "docker", "apptainer", "python"]
ALL_BACKENDS = list(get_args(Backend))
registry = {}
# Auto register
_auto_register_completed = False
def auto_register(force=False):
global _auto_register_completed
if _auto_register_completed and not force:
return
# TODO: do this more robustly
for package in os.listdir(os.path.dirname(methods.__file__)):
if package.endswith(".py") and not package.startswith("_") and package != "__init__.py":
package = package[:-3]
importlib.import_module(f".methods.{package}", __package__)
_auto_register_completed = True
def register(spec: "MethodSpec", name: str, *args, metadata=None, **kwargs) -> "MethodSpec":
assert name not in registry, f"Method {name} already registered"
if metadata is None:
metadata = {}
metadata = {**spec.metadata, **metadata}
spec = dataclasses.replace(spec, args=spec.args + args, kwargs={**spec.kwargs, **kwargs}, metadata=metadata)
registry[name] = spec
return spec
class _LazyMethodMeta(type):
def __getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
module, name = __name
module_base = methods.__package__
def build(ns):
def new(ncls, *args, **kwargs):
old_init = ncls.__init__
# For partialclass
if hasattr(old_init, "__original_func__"):
args = old_init.__args__ + args
kwargs = {**old_init.__kwargs__, **kwargs}
mod = importlib.import_module(module, methods.__package__)
ncls = getattr(mod, name)
assert inspect.isclass(ncls)
return ncls(*args, **kwargs)
ns["__new__"] = new
ncls = types.new_class(name, exec_body=build, bases=(Method,))
ncls.__module__ = module_base + module if module.startswith(".") else module
ncls.__name__ = name
return typing.cast(Type[Method], ncls)
class LazyMethod(object, metaclass=_LazyMethodMeta):
def __class_getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
return _LazyMethodMeta.__getitem__(cls, __name)
@dataclass(frozen=True)
class MethodSpec:
method: Type[Method]
conda: Optional[Type[CondaMethod]] = None
docker: Optional[Type[DockerMethod]] = None | apptainer: Optional[Type[ApptainerMethod]] = None | 2 | 2023-11-07 20:22:35+00:00 | 8k |
microsoft/Everything-of-Thoughts-XoT | xot_all_in_one/xot/controller/solver/xot_solver.py | [
{
"identifier": "MCTS",
"path": "xot_all_in_one/xot/controller/solver/MCTS.py",
"snippet": "class MCTS():\n \"\"\"\n This class handles the MCTS tree.\n \"\"\"\n\n def __init__(self, game, nnet, args, player=1):\n self.game = game\n self.player = player\n self.nnet = nnet\n self.args = args\n self.Qsa = {} # stores Q values for s,a (as defined in the paper)\n self.Nsa = {} # stores #times edge s,a was visited\n self.Ns = {} # stores #times board s was visited\n self.Ps = {} # stores initial policy (returned by neural net)\n\n self.Es = {} # stores game.getGameEnded ended for board s\n self.Vs = {} # stores game.getValidMoves for board s\n self.modelCall = 0\n\n def getActionProb(self, canonicalBoard, temp=1, step=0):\n \"\"\"\n This function performs numMCTSSims simulations of MCTS starting from\n canonicalBoard.\n\n Returns:\n probs: a policy vector where the probability of the ith action is\n proportional to Nsa[(s,a)]**(1./temp)\n \"\"\"\n for i in range(self.args.numMCTSSims):\n if self.player == 2:\n self.search(canonicalBoard)\n elif self.player == 1:\n self.searchSinglePlayer(canonicalBoard, step=step)\n\n \n s = self.game.stringRepresentation(canonicalBoard)\n counts = [self.Nsa[(s, a)] if (s, a) in self.Nsa else 0 for a in range(self.game.getActionSize())]\n\n if temp == 0:\n bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()\n bestA = np.random.choice(bestAs)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n \n counts = [x ** (1. / temp) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n return probs\n\n def searchSinglePlayer(self, canonicalBoard, step=0):\n \"\"\"\n This function performs one iteration of MCTS. It is recursively called\n till a leaf node is found. The action chosen at each node is one that\n has the maximum upper confidence bound as in the paper.\n\n Once a leaf node is found, the neural network is called to return an\n initial policy P and a value v for the state. This value is propagated\n up the search path. In case the leaf node is a terminal state, the\n outcome is propagated up the search path. The values of Ns, Nsa, Qsa are\n updated.\n\n NOTE: the return values are the negative of the value of the current\n state. This is done since v is in [-1,1] and if v is the value of a\n state for the current player, then its value is -v for the other player.\n\n Returns:\n v: the negative of the value of the current canonicalBoard\n \"\"\"\n \n s = self.game.stringRepresentation(canonicalBoard)\n\n terminate = self.game.isTerminate(canonicalBoard, step)\n\n if s not in self.Es:\n self.Es[s] = self.game.getGameEnded(canonicalBoard)\n if terminate:\n # terminal node\n return self.Es[s]\n\n if s not in self.Ps:\n # leaf node\n self.Ps[s], v = self.nnet.predict(canonicalBoard)\n self.modelCall += 1\n valids = self.game.getValidMoves(canonicalBoard)\n self.Ps[s] = self.Ps[s] * valids # masking invalid moves\n sum_Ps_s = np.sum(self.Ps[s])\n if sum_Ps_s > 0:\n self.Ps[s] /= sum_Ps_s # renormalize\n else:\n # if all valid moves were masked make all valid moves equally probable\n\n # NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.\n # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process. \n log.error(\"All valid moves were masked, doing a workaround.\")\n self.Ps[s] = self.Ps[s] + valids\n self.Ps[s] /= np.sum(self.Ps[s])\n\n self.Vs[s] = valids\n self.Ns[s] = 0\n return v\n\n valids = self.Vs[s]\n cur_best = -float('inf')\n best_act = -1\n\n # pick the action with the highest upper confidence bound\n for a in range(self.game.getActionSize()):\n if valids[a]:\n if (s, a) in self.Qsa:\n u = self.Qsa[(s, a)] + self.args.model.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s]) / (\n 1 + self.Nsa[(s, a)])\n else:\n u = self.args.model.cpuct * self.Ps[s][a] * math.sqrt(self.Ns[s] + EPS) # Q = 0 ?\n if u > cur_best:\n cur_best = u\n best_act = a\n\n a = best_act\n next_s, _ = self.game.getNextState(canonicalBoard, a)\n\n v = self.searchSinglePlayer(next_s, step+1)\n\n if (s, a) in self.Qsa:\n self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)\n self.Nsa[(s, a)] += 1\n\n else:\n self.Qsa[(s, a)] = v\n self.Nsa[(s, a)] = 1\n\n self.Ns[s] += 1\n return v\n\n\n def inferSinglePlayer(self, canonicalBoard, step=0, seed=42):\n \"\"\"\n This function performs one iteration of MCTS. It is recursively called\n till a leaf node is found. The action chosen at each node is one that\n has the maximum upper confidence bound as in the paper.\n\n Once a leaf node is found, the neural network is called to return an\n initial policy P and a value v for the state. This value is propagated\n up the search path. In case the leaf node is a terminal state, the\n outcome is propagated up the search path. The values of Ns, Nsa, Qsa are\n updated.\n\n NOTE: the return values are the negative of the value of the current\n state. This is done since v is in [-1,1] and if v is the value of a\n state for the current player, then its value is -v for the other player.\n\n Returns:\n v: the negative of the value of the current canonicalBoard\n \"\"\"\n np.random.seed(seed)\n state = canonicalBoard\n # print('canonicalBoard', state)\n selected_ac_seq = []\n for i in range(self.game.total_game_step):\n # print('state', state)\n terminate = self.game.isTerminate(state, i)\n if terminate:\n break\n\n s = self.game.stringRepresentation(state)\n valids = self.game.getValidMoves(state)\n # print('state', s)\n # print('valids', valids)\n # print('self.Qsa', self.Qsa)\n ac_candidates = [action for (state, action) in self.Qsa.keys() if state == s]\n # print(ac_candidates)\n # input()\n # pick the action with the highest upper confidence bound\n counts = []\n \n # for a in ac_candidates:\n for a in range(self.game.getActionSize()):\n try:\n c_ = self.Nsa[(s, a)] \n counts.append(c_)\n except:\n counts.append(0)\n\n counts_sum = float(sum(counts))\n if counts_sum == 0:\n probs, _ = self.nnet.predict(state)\n counts = probs.tolist()\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n \n else:\n probs = [x / counts_sum for x in counts]\n \n \n # print('probs', probs)\n valid_moves = self.game.getValidMoves(state)\n # print('valid_moves', valid_moves)\n masked_prob = valid_moves * probs\n counts_sum_masked = float(sum(masked_prob))\n probs = [x / counts_sum_masked for x in masked_prob]\n # print('masked_probs', probs)\n # input()\n selected_ac = np.random.choice(len(probs), p=probs)\n # print('selected_ac', selected_ac)\n state, action_in_text= self.game.getNextState(state, selected_ac)\n # print(state, action_in_text)\n selected_ac_seq.append(action_in_text)\n\n res = self.game.getGameEnded(state)\n # print('selected_ac_seq', selected_ac_seq)\n # input()\n return selected_ac_seq, res\n\n \n \n def getModelCall(self):\n return self.modelCall\n \n def reset(self):\n self.Qsa = {} # stores Q values for s,a (as defined in the paper)\n self.Nsa = {} # stores #times edge s,a was visited\n self.Ns = {} # stores #times board s was visited\n self.Ps = {} # stores initial policy (returned by neural net)\n self.Es = {} # stores game.getGameEnded ended for board s\n self.Vs = {} # stores game.getValidMoves for board s\n # self.modelCall = 0"
},
{
"identifier": "Coach",
"path": "xot_all_in_one/xot/controller/solver/Coach.py",
"snippet": "class Coach():\n \"\"\"\n This class executes the self-play + learning. It uses the functions defined\n in Game and NeuralNet. args are specified in main.py.\n \"\"\"\n\n def __init__(self, game, nnet, args, player=2):\n self.game = game\n self.nnet = nnet\n self.pnet = self.nnet.__class__(self.game) # the competitor network\n self.args = args\n self.player = player\n self.mcts = MCTS(self.game, self.nnet, self.args, self.player)\n self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations\n self.skipFirstSelfPlay = False # can be overriden in loadTrainExamples()\n \n\n def executeEpisode(self):\n \"\"\"\n This function executes one episode of self-play, starting with player 1.\n As the game is played, each turn is added as a training example to\n trainExamples. The game is played till the game ends. After the game\n ends, the outcome of the game is used to assign values to each example\n in trainExamples.\n\n It uses a temp=1 if episodeStep < tempThreshold, and thereafter\n uses temp=0.\n\n Returns:\n trainExamples: a list of examples of the form (canonicalBoard, currPlayer, pi,v)\n pi is the MCTS informed policy vector, v is +1 if\n the player eventually won the game, else -1.\n \"\"\"\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n rewards = [0]\n\n while True:\n \n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer) if self.player == 2 else board\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp, step=episodeStep)\n\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n if self.player == 2:\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n r = self.game.getGameEnded(board, self.curPlayer)\n else:\n board, self.curPlayer = self.game.getNextState(board, action)\n r = self.game.getGameEnded(board)\n\n rewards.append(r)\n\n episodeStep += 1\n terminate = self.game.isTerminate(board, episodeStep)\n if terminate:\n sym = self.game.getSymmetries(board, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n # if r == 1:\n # for i, x in enumerate(trainExamples):\n # _, v = self.nnet.predict(x[0])\n # print(x[0], sum(rewards[i:]), v)\n return [(x[0], x[2], sum(rewards[i:])) for i, x in enumerate(trainExamples)]\n\n\n def learn(self):\n \"\"\"\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximum length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n \"\"\"\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n logging.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args, self.player) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n logging.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args, self.player)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args, self.player)\n\n logging.info('PITTING AGAINST PREVIOUS VERSION')\n \n pmcts_modelcall_before = pmcts.getModelCall()\n nmcts_modelcall_before = nmcts.getModelCall()\n \n arena = ArenaSingle(pmcts, nmcts, self.game, self.args.winReward)\n pwins, nwins = arena.playGames(self.args.arenaCompare, verbose=True)\n pmcts_modelcall_after = pmcts.getModelCall()\n nmcts_modelcall_after = nmcts.getModelCall()\n\n pmcts_modelcall_avg = round((pmcts_modelcall_after - pmcts_modelcall_before) / self.args.arenaCompare, 2)\n nmcts_modelcall_avg = round((nmcts_modelcall_after - nmcts_modelcall_before) / self.args.arenaCompare, 2)\n\n logging.info('NEW/PREV WINS : %d / %d, NEW/PREV AVG CALL : %s / %s, ' % (nwins, pwins, nmcts_modelcall_avg, pmcts_modelcall_avg))\n\n \n if pwins + nwins == 0 or float(nwins - pwins) / self.args.arenaCompare < self.args.updateThreshold:\n logging.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename='temp.pth.tar')\n else:\n logging.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename='best.pth.tar')\n\n\n def infer(self):\n \"\"\"\n Load model and generate thoughts.\n \"\"\"\n \n # training new network, keeping a copy of the old one\n self.pnet.load_checkpoint(folder=self.args.checkpoint + self.args.env + '/', filename='best.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args, self.player)\n\n logging.info('TESTING BEGAIN:')\n \n pmcts_modelcall_before = pmcts.getModelCall()\n \n arena = ArenaTest(pmcts, self.game, self.args.winReward)\n pwins, thoughts_record = arena.playGames(self.args.arenaCompare, verbose=True)\n pmcts_modelcall_after = pmcts.getModelCall()\n\n pmcts_modelcall_avg = round((pmcts_modelcall_after - pmcts_modelcall_before) / self.args.arenaCompare, 2)\n thoughts_acc = round(pwins/self.game.test_size, 4) * 100\n\n logging.info('TESTING WINS : %d / %d, THOUGHTS ACC : %d %%, TESTING AVG CALL : %s' % (pwins, self.game.test_size, thoughts_acc, pmcts_modelcall_avg))\n pd_thoughts = pd.DataFrame(data=thoughts_record, columns=['problem_state', 'thoughts', 'acc'])\n pd_thoughts.to_csv('./logs/%s_thoughts.csv'%self.args.env)\n \n def generate_thoughts(self, board, player, early_stop=1000):\n problem_state = board\n step = 0\n actions, action_in_text_list = [], []\n # print('self.game.total_game_step', self.game.total_game_step)\n # input()\n while not self.game.isTerminate(board, step):\n action = player(board)\n valids = self.game.getValidMoves(board)\n board, action_in_text = self.game.getNextState(board, action)\n actions.append(action)\n action_in_text_list.append(action_in_text)\n step += 1\n \n return problem_state, self.game.getGameEnded(board), actions, action_in_text_list\n\n def getCheckpointFile(self, iteration):\n return 'checkpoint_' + str(iteration) + '.pth.tar'\n\n def saveTrainExamples(self, iteration):\n folder = self.args.checkpoint + self.args.env + '/'\n if not os.path.exists(folder):\n os.makedirs(folder)\n filename = os.path.join(folder, self.getCheckpointFile(iteration) + \".examples\")\n with open(filename, \"wb+\") as f:\n Pickler(f).dump(self.trainExamplesHistory)\n f.closed\n\n def loadTrainExamples(self):\n modelFile = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])\n examplesFile = modelFile + \".examples\"\n if not os.path.isfile(examplesFile):\n logging.warning(f'File \"{examplesFile}\" with trainExamples not found!')\n r = input(\"Continue? [y|n]\")\n if r != \"y\":\n sys.exit()\n else:\n logging.info(\"File with trainExamples found. Loading it...\")\n with open(examplesFile, \"rb\") as f:\n self.trainExamplesHistory = Unpickler(f).load()\n logging.info('Loading done!')\n\n # examples based on the model were already collected (loaded)\n self.skipFirstSelfPlay = True"
}
] | import os
import json
import itertools
import random
import ast
import re
import numpy as np
import pandas as pd
from collections import Counter
from .MCTS import MCTS
from .Coach import Coach
from .pytorch_game24.NNet import NNetWrapper as nn
from .pytorch_cube.NNet import NNetWrapper as nn
from .pytorch_npuzzle.NNet import NNetWrapper as nn | 5,334 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class XoT_Solver:
"""
The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS),
Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its
solutions based on feedback.
Attributes:
args: A configuration object containing various parameters.
gpt: An instance of a GPT model for generating prompts.
game: An instance of the game to be solved.
prompter: An instance of a class for generating prompts.
parser: An instance of a class for parsing actions and thoughts.
nmcts: An instance of MCTS.
c: An instance of a Coach.
to_print: A boolean indicating whether to print debug information.
"""
def __init__(self, args, gpt, game, prompter, parser, to_print=False):
"""
Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.
"""
self.args = args
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
self.nmcts, self.c = self.initial_xot(args)
self.to_print = to_print
def initial_xot(self, args):
"""
Initializes the Neural Network and MCTS based on the game environment specified in the arguments.
"""
if args.env.lower() == 'game24':
elif args.env.lower() == 'cube':
elif args.env.lower() == 'npuzzle':
else:
raise ValueError
nnet = nn(self.game)
nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class XoT_Solver:
"""
The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS),
Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its
solutions based on feedback.
Attributes:
args: A configuration object containing various parameters.
gpt: An instance of a GPT model for generating prompts.
game: An instance of the game to be solved.
prompter: An instance of a class for generating prompts.
parser: An instance of a class for parsing actions and thoughts.
nmcts: An instance of MCTS.
c: An instance of a Coach.
to_print: A boolean indicating whether to print debug information.
"""
def __init__(self, args, gpt, game, prompter, parser, to_print=False):
"""
Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.
"""
self.args = args
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
self.nmcts, self.c = self.initial_xot(args)
self.to_print = to_print
def initial_xot(self, args):
"""
Initializes the Neural Network and MCTS based on the game environment specified in the arguments.
"""
if args.env.lower() == 'game24':
elif args.env.lower() == 'cube':
elif args.env.lower() == 'npuzzle':
else:
raise ValueError
nnet = nn(self.game)
nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename) | nmcts = MCTS(self.game, nnet, args) | 0 | 2023-11-08 09:48:34+00:00 | 8k |
ultraleap/leapc-python-bindings | leapc-python-api/src/leap/connection.py | [
{
"identifier": "Device",
"path": "leapc-python-api/src/leap/device.py",
"snippet": "class Device:\n def __init__(self, device_ref=None, *, device=None, owner=None):\n \"\"\"A Device is usually constructed from a LEAP_DEVICE_REF object.\n\n Some functions require the device to be opened before they can be\n called.\n\n If a DeviceLost event occurs, this can be created from a LEAP_DEVICE\n object. In this case the Device is already open and does not need to\n be closed by the user.\n\n The 'owner' argument is a CFFI object that must be kept alive\n for the device ref to remain valid. It should never be used from\n within the class.\n \"\"\"\n self._device_ref = device_ref\n self._device = device\n self._owner = owner\n\n @property\n def c_data_device_ref(self):\n \"\"\"Get the LEAP_DEVICE_REF object for this object\"\"\"\n return self._device_ref\n\n @property\n def c_data_device(self):\n \"\"\"Get the LEAP_DEVICE object for this object\n\n If the device is not open, returns None\n \"\"\"\n return self._device\n\n @property\n def id(self):\n if self._device_ref is None:\n # The device must have been returned from a DeviceLostEvent\n # This means it does not have an id, so return None\n return\n return self._device_ref.id\n\n @contextmanager\n def open(self):\n if self._device is not None:\n raise LeapCannotOpenDeviceError(\"Device is already open\")\n\n device_ptr = ffi.new(\"LEAP_DEVICE*\")\n success_or_raise(libleapc.LeapOpenDevice, self._device_ref, device_ptr)\n self._device = device_ptr[0]\n try:\n yield self\n finally:\n self._device = None\n libleapc.LeapCloseDevice(device_ptr[0])\n\n def get_info(self):\n \"\"\"Get a DeviceInfo object containing information about this device\n\n Requires the Device to be open.\n Raises DeviceNotOpenException if the device is not open.\n \"\"\"\n if self._device is None:\n raise DeviceNotOpenException()\n info_ptr = ffi.new(\"LEAP_DEVICE_INFO*\")\n info_ptr.size = ffi.sizeof(info_ptr[0])\n info_ptr.serial = ffi.NULL\n success_or_raise(libleapc.LeapGetDeviceInfo, self._device, info_ptr)\n info_ptr.serial = ffi.new(\"char[]\", info_ptr.serial_length)\n success_or_raise(libleapc.LeapGetDeviceInfo, self._device, info_ptr)\n return DeviceInfo(info_ptr[0])\n\n def get_camera_count(self):\n if not self._device:\n raise DeviceNotOpenException()\n camera_count_ptr = ffi.new(\"uint8_t *\")\n success_or_raise(libleapc.LeapGetDeviceCameraCount, self._device, camera_count_ptr)\n return camera_count_ptr[0]"
},
{
"identifier": "ConnectionStatus",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class ConnectionStatus(metaclass=LeapEnum):\n pass"
},
{
"identifier": "EventType",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class EventType(metaclass=LeapEnum):\n pass"
},
{
"identifier": "RS",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class RS(metaclass=LeapEnum):\n pass"
},
{
"identifier": "ConnectionConfig",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class ConnectionConfig(metaclass=LeapEnum):\n pass"
},
{
"identifier": "TrackingMode",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class TrackingMode(metaclass=LeapEnum):\n pass"
},
{
"identifier": "PolicyFlag",
"path": "leapc-python-api/src/leap/enums.py",
"snippet": "class PolicyFlag(metaclass=LeapEnum):\n pass"
},
{
"identifier": "LatestEventListener",
"path": "leapc-python-api/src/leap/event_listener.py",
"snippet": "class LatestEventListener(Listener):\n def __init__(self, target: EventType):\n self._target = target\n self.event: Optional[Event] = None\n\n def on_event(self, event: Event):\n if event.type == self._target:\n self.event = event"
},
{
"identifier": "Listener",
"path": "leapc-python-api/src/leap/event_listener.py",
"snippet": "class Listener:\n \"\"\"Base class for custom Listeners to Connections\n\n This should be subclassed and methods overridden to handle events and errors.\n \"\"\"\n\n def on_event(self, event: Event):\n \"\"\"Called every event\n\n Note that if this method is overridden, the more specific event functions will not be called\n unless the overridden method calls this method.\n \"\"\"\n getattr(self, self._EVENT_CALLS[event.type])(event)\n\n def on_error(self, error: LeapError):\n \"\"\"If an error occurs in polling, the Exception is passed to this function\"\"\"\n pass\n\n def on_none_event(self, event: Event):\n pass\n\n def on_connection_event(self, event: Event):\n pass\n\n def on_connection_lost_event(self, event: Event):\n pass\n\n def on_device_event(self, event: Event):\n pass\n\n def on_device_failure_event(self, event: Event):\n pass\n\n def on_policy_event(self, event: Event):\n pass\n\n def on_tracking_event(self, event: Event):\n pass\n\n def on_image_request_error_event(self, event: Event):\n pass\n\n def on_image_complete_event(self, event: Event):\n pass\n\n def on_log_event(self, event: Event):\n pass\n\n def on_device_lost_event(self, event: Event):\n pass\n\n def on_config_response_event(self, event: Event):\n pass\n\n def on_config_change_event(self, event: Event):\n pass\n\n def on_device_status_change_event(self, event: Event):\n pass\n\n def on_dropped_frame_event(self, event: Event):\n pass\n\n def on_image_event(self, event: Event):\n pass\n\n def on_point_mapping_change_event(self, event: Event):\n pass\n\n def on_tracking_mode_event(self, event: Event):\n pass\n\n def on_log_events(self, event: Event):\n pass\n\n def on_head_pose_event(self, event: Event):\n pass\n\n def on_eyes_event(self, event: Event):\n pass\n\n def on_imu_event(self, event: Event):\n pass\n\n _EVENT_CALLS = {\n EventType.EventTypeNone: \"on_none_event\",\n EventType.Connection: \"on_connection_event\",\n EventType.ConnectionLost: \"on_connection_lost_event\",\n EventType.Device: \"on_device_event\",\n EventType.DeviceFailure: \"on_device_failure_event\",\n EventType.Policy: \"on_policy_event\",\n EventType.Tracking: \"on_tracking_event\",\n EventType.ImageRequestError: \"on_image_request_error_event\",\n EventType.ImageComplete: \"on_image_complete_event\",\n EventType.LogEvent: \"on_log_event\",\n EventType.DeviceLost: \"on_device_lost_event\",\n EventType.ConfigResponse: \"on_config_response_event\",\n EventType.ConfigChange: \"on_config_change_event\",\n EventType.DeviceStatusChange: \"on_device_status_change_event\",\n EventType.DroppedFrame: \"on_dropped_frame_event\",\n EventType.Image: \"on_image_event\",\n EventType.PointMappingChange: \"on_point_mapping_change_event\",\n EventType.TrackingMode: \"on_tracking_mode_event\",\n EventType.LogEvents: \"on_log_events\",\n EventType.HeadPose: \"on_head_pose_event\",\n EventType.Eyes: \"on_eyes_event\",\n EventType.IMU: \"on_imu_event\",\n }"
},
{
"identifier": "create_event",
"path": "leapc-python-api/src/leap/events.py",
"snippet": "def create_event(data):\n \"\"\"Create an Event from `LEAP_CONNECTION_MESSAGE*` cdata\"\"\"\n events = {\n EventType.EventTypeNone: NoneEvent,\n EventType.Connection: ConnectionEvent,\n EventType.ConnectionLost: ConnectionLostEvent,\n EventType.Device: DeviceEvent,\n EventType.DeviceFailure: DeviceFailureEvent,\n EventType.Policy: PolicyEvent,\n EventType.Tracking: TrackingEvent,\n EventType.ImageRequestError: ImageRequestErrorEvent,\n EventType.ImageComplete: ImageCompleteEvent,\n EventType.LogEvent: LogEvent,\n EventType.DeviceLost: DeviceLostEvent,\n EventType.ConfigResponse: ConfigResponseEvent,\n EventType.ConfigChange: ConfigChangeEvent,\n EventType.DeviceStatusChange: DeviceStatusChangeEvent,\n EventType.DroppedFrame: DroppedFrameEvent,\n EventType.Image: ImageEvent,\n EventType.PointMappingChange: PointMappingChangeEvent,\n EventType.TrackingMode: TrackingModeEvent,\n EventType.LogEvents: LogEvents,\n EventType.HeadPose: HeadPoseEvent,\n EventType.Eyes: EyesEvent,\n EventType.IMU: IMUEvent,\n }\n return events[EventType(data.type)].from_connection_message(data)"
},
{
"identifier": "Event",
"path": "leapc-python-api/src/leap/events.py",
"snippet": "class Event(LeapCStruct):\n \"\"\"Base class for Events\n\n Events have extra 'type' and 'metadata' properties.\n\n If the Event is constructed using the default constructor, the metadata is not populated.\n\n If the event is constructed using a `LEAP_CONNECTION_MESSAGE*` via the\n `from_connection_message` method, extra metadata will be available on\n the event.\n \"\"\"\n\n # The type of event this class corresponds to\n _EVENT_TYPE = EventType.EventTypeNone\n # The member on the `LEAP_CONNECTION_MESSAGE` that corresponds to the\n # event data.\n _EVENT_MESSAGE_ATTRIBUTE = \"pointer\"\n\n def __init__(self, data):\n super().__init__(data)\n self._metadata = None\n\n @classmethod\n def from_connection_message(cls, c_message):\n \"\"\"Construct an Event from a LEAP_CONNECTION_MESSAGE* object\n\n Constructing an event in this way populates the event metadata.\n \"\"\"\n if EventType(c_message.type) != cls._EVENT_TYPE:\n raise ValueError(\"Incorect event type\")\n\n event = cls(getattr(c_message, cls._EVENT_ATTRIBUTE))\n event._metadata = EventMetadata(c_message)\n return event\n\n @classmethod\n def _get_event_cdata(cls, c_message):\n return getattr(c_message, cls._EVENT_ATTRIBUTE)\n\n @property\n def metadata(self):\n return self._metadata\n\n @property\n def type(self):\n return self._EVENT_TYPE"
},
{
"identifier": "create_exception",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "def create_exception(result: LeapRS, *args, **kwargs):\n \"\"\"Create an exception from a LeapRS object\n\n Extra args and kwargs are forwarded to the Exception constructor.\n\n :param result: The result to create an Exception from\n \"\"\"\n if result == LeapRS.Success:\n raise ValueError(\"Success is not an Error\")\n\n _ERRORS = {\n LeapRS.UnknownError: LeapUnknownError,\n LeapRS.InvalidArgument: LeapInvalidArgumentError,\n LeapRS.InsufficientResources: LeapInsufficientResourcesError,\n LeapRS.InsufficientBuffer: LeapInsufficientBufferError,\n LeapRS.Timeout: LeapTimeoutError,\n LeapRS.NotConnected: LeapNotConnectedError,\n LeapRS.HandshakeIncomplete: LeapHandshakeIncompleteError,\n LeapRS.BufferSizeOverflow: LeapBufferSizeOverflowError,\n LeapRS.ProtocolError: LeapProtocolError,\n LeapRS.InvalidClientID: LeapInvalidClientIDError,\n LeapRS.UnexpectedClosed: LeapUnexpectedClosedError,\n LeapRS.UnknownImageFrameRequest: LeapUnknownImageFrameRequestError,\n LeapRS.RoutineIsNotSeer: LeapRoutineIsNotSeerError,\n LeapRS.TimestampTooEarly: LeapTimestampTooEarlyError,\n LeapRS.ConcurrentPoll: LeapConcurrentPollError,\n LeapRS.NotAvailable: LeapNotAvailableError,\n LeapRS.NotStreaming: LeapNotStreamingError,\n LeapRS.CannotOpenDevice: LeapCannotOpenDeviceError,\n }\n\n return _ERRORS[result](args, kwargs)"
},
{
"identifier": "success_or_raise",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "def success_or_raise(func, *args):\n \"\"\"Call the function with the args, and raise an exception if the result is not success\n\n The function must be a LeapC cffi function which returns a LeapRS object.\n \"\"\"\n result = LeapRS(func(*args))\n if result != LeapRS.Success:\n raise create_exception(result)"
},
{
"identifier": "LeapError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapError(Exception):\n pass"
},
{
"identifier": "LeapConnectionAlreadyOpen",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapConnectionAlreadyOpen(LeapError):\n pass"
},
{
"identifier": "LeapConcurrentPollError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapConcurrentPollError(LeapError):\n pass"
},
{
"identifier": "LeapNotConnectedError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapNotConnectedError(LeapError):\n pass"
},
{
"identifier": "LeapTimeoutError",
"path": "leapc-python-api/src/leap/exceptions.py",
"snippet": "class LeapTimeoutError(LeapError):\n pass"
}
] | from contextlib import contextmanager
from typing import Dict, Optional, List, Callable
from timeit import default_timer as timer
from leapc_cffi import ffi, libleapc
from .device import Device
from .enums import (
ConnectionStatus,
EventType,
RS as LeapRS,
ConnectionConfig as ConnectionConfigEnum,
TrackingMode,
PolicyFlag,
)
from .event_listener import LatestEventListener, Listener
from .events import create_event, Event
from .exceptions import (
create_exception,
success_or_raise,
LeapError,
LeapConnectionAlreadyOpen,
LeapConcurrentPollError,
LeapNotConnectedError,
LeapTimeoutError,
)
import sys
import threading
import time
import json | 3,911 |
class ConnectionConfig:
"""Configuration for a Connection
Allows a user to enable multi device functionality prior to connection.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
):
self._data_ptr = ffi.new("LEAP_CONNECTION_CONFIG*")
self._data_ptr.server_namespace = server_namespace
self._data_ptr.flags = 0
self._data_ptr.size = ffi.sizeof(self._data_ptr[0])
if multi_device_aware:
self._data_ptr.flags |= ConnectionConfigEnum.MultiDeviceAware.value
class Connection:
"""Connection to a Leap Server
:param listeners: A List of event listeners. Defaults to None
:param poll_timeout: A timeout of poll messages, in seconds. Defaults to 1 second.
:param response_timeout: A timeout to wait for specific events in response to events.
Defaults to 10 seconds.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
listeners: Optional[List[Listener]] = None,
poll_timeout: float = 1,
response_timeout: float = 10,
):
if listeners is None:
listeners = []
self._listeners = listeners
self._connection_ptr = self._create_connection(server_namespace, multi_device_aware)
self._poll_timeout = int(poll_timeout * 1000) # Seconds to milliseconds
self._response_timeout = int(response_timeout)
self._stop_poll_flag = False
self._is_open = False
self._poll_thread = None
def __del__(self):
# Since 'destroy_connection' only tells C to free the memory that it allocated
# for our connection, it is appropriate to leave the deletion of this to the garbage
# collector.
if hasattr(self, "_connection_ptr"):
# We have this 'if' statement to deal with the possibility that an Exception
# could be raised in the __init__ method, before this has been assigned.
self._destroy_connection(self._connection_ptr)
def add_listener(self, listener: Listener):
self._listeners.append(listener)
def remove_listener(self, listener: Listener):
self._listeners.remove(listener)
def poll(self, timeout: Optional[float] = None) -> Event:
"""Manually poll the connection from this thread
Do not notify listeners about the result of this poll.
:param timeout: The timeout of the poll, in seconds.
Defaults to the number the Connection was initialised with.
"""
if self._poll_thread is not None:
raise LeapConcurrentPollError
if timeout is None:
timeout = self._poll_timeout
else:
timeout = int(timeout * 1000) # Seconds to milliseconds
event_ptr = ffi.new("LEAP_CONNECTION_MESSAGE*")
success_or_raise(libleapc.LeapPollConnection, self._connection_ptr[0], timeout, event_ptr)
|
class ConnectionConfig:
"""Configuration for a Connection
Allows a user to enable multi device functionality prior to connection.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
):
self._data_ptr = ffi.new("LEAP_CONNECTION_CONFIG*")
self._data_ptr.server_namespace = server_namespace
self._data_ptr.flags = 0
self._data_ptr.size = ffi.sizeof(self._data_ptr[0])
if multi_device_aware:
self._data_ptr.flags |= ConnectionConfigEnum.MultiDeviceAware.value
class Connection:
"""Connection to a Leap Server
:param listeners: A List of event listeners. Defaults to None
:param poll_timeout: A timeout of poll messages, in seconds. Defaults to 1 second.
:param response_timeout: A timeout to wait for specific events in response to events.
Defaults to 10 seconds.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
listeners: Optional[List[Listener]] = None,
poll_timeout: float = 1,
response_timeout: float = 10,
):
if listeners is None:
listeners = []
self._listeners = listeners
self._connection_ptr = self._create_connection(server_namespace, multi_device_aware)
self._poll_timeout = int(poll_timeout * 1000) # Seconds to milliseconds
self._response_timeout = int(response_timeout)
self._stop_poll_flag = False
self._is_open = False
self._poll_thread = None
def __del__(self):
# Since 'destroy_connection' only tells C to free the memory that it allocated
# for our connection, it is appropriate to leave the deletion of this to the garbage
# collector.
if hasattr(self, "_connection_ptr"):
# We have this 'if' statement to deal with the possibility that an Exception
# could be raised in the __init__ method, before this has been assigned.
self._destroy_connection(self._connection_ptr)
def add_listener(self, listener: Listener):
self._listeners.append(listener)
def remove_listener(self, listener: Listener):
self._listeners.remove(listener)
def poll(self, timeout: Optional[float] = None) -> Event:
"""Manually poll the connection from this thread
Do not notify listeners about the result of this poll.
:param timeout: The timeout of the poll, in seconds.
Defaults to the number the Connection was initialised with.
"""
if self._poll_thread is not None:
raise LeapConcurrentPollError
if timeout is None:
timeout = self._poll_timeout
else:
timeout = int(timeout * 1000) # Seconds to milliseconds
event_ptr = ffi.new("LEAP_CONNECTION_MESSAGE*")
success_or_raise(libleapc.LeapPollConnection, self._connection_ptr[0], timeout, event_ptr) | return create_event(event_ptr) | 9 | 2023-11-08 13:35:40+00:00 | 8k |
UMass-Foundation-Model/CoVLM | transformers/src/transformers/trainer_utils.py | [
{
"identifier": "ExplicitEnum",
"path": "transformers/src/transformers/utils/generic.py",
"snippet": "class ExplicitEnum(str, Enum):\n \"\"\"\n Enum with more explicit error message for missing values.\n \"\"\"\n\n @classmethod\n def _missing_(cls, value):\n raise ValueError(\n f\"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}\"\n )"
},
{
"identifier": "is_psutil_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def is_psutil_available():\n return _psutil_available"
},
{
"identifier": "is_tf_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def is_tf_available():\n return _tf_available"
},
{
"identifier": "is_torch_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def is_torch_available():\n return _torch_available"
},
{
"identifier": "is_torch_cuda_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def is_torch_cuda_available():\n if is_torch_available():\n import torch\n\n return torch.cuda.is_available()\n else:\n return False"
},
{
"identifier": "is_torch_mps_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def is_torch_mps_available():\n if is_torch_available():\n import torch\n\n if hasattr(torch.backends, \"mps\"):\n return torch.backends.mps.is_available()\n return False"
},
{
"identifier": "is_torch_npu_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "@lru_cache()\ndef is_torch_npu_available(check_device=False):\n \"Checks if `torch_npu` is installed and potentially if a NPU is in the environment\"\n if not _torch_available or importlib.util.find_spec(\"torch_npu\") is None:\n return False\n\n import torch\n import torch_npu # noqa: F401\n\n if check_device:\n try:\n # Will raise a RuntimeError if no NPU is found\n _ = torch.npu.device_count()\n return torch.npu.is_available()\n except RuntimeError:\n return False\n return hasattr(torch, \"npu\") and torch.npu.is_available()"
},
{
"identifier": "is_torch_tpu_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "@lru_cache()\ndef is_torch_tpu_available(check_device=True):\n \"Checks if `torch_xla` is installed and potentially if a TPU is in the environment\"\n if not _torch_available:\n return False\n if importlib.util.find_spec(\"torch_xla\") is not None:\n if check_device:\n # We need to check if `xla_device` can be found, will raise a RuntimeError if not\n try:\n import torch_xla.core.xla_model as xm\n\n _ = xm.xla_device()\n return True\n except RuntimeError:\n return False\n return True\n return False"
},
{
"identifier": "is_torch_xpu_available",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "@lru_cache\ndef is_torch_xpu_available(check_device=False):\n \"Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment\"\n if not is_ipex_available():\n return False\n\n import intel_extension_for_pytorch # noqa: F401\n import torch\n\n if check_device:\n try:\n # Will raise a RuntimeError if no XPU is found\n _ = torch.xpu.device_count()\n return torch.xpu.is_available()\n except RuntimeError:\n return False\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()"
},
{
"identifier": "requires_backends",
"path": "transformers/src/transformers/utils/import_utils.py",
"snippet": "def requires_backends(obj, backends):\n if not isinstance(backends, (list, tuple)):\n backends = [backends]\n\n name = obj.__name__ if hasattr(obj, \"__name__\") else obj.__class__.__name__\n\n # Raise an error for users who might not realize that classes without \"TF\" are torch-only\n if \"torch\" in backends and \"tf\" not in backends and not is_torch_available() and is_tf_available():\n raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name))\n\n # Raise the inverse error for PyTorch users trying to load TF classes\n if \"tf\" in backends and \"torch\" not in backends and is_torch_available() and not is_tf_available():\n raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name))\n\n checks = (BACKENDS_MAPPING[backend] for backend in backends)\n failed = [msg.format(name) for available, msg in checks if not available()]\n if failed:\n raise ImportError(\"\".join(failed))"
}
] | import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
import numpy as np
import torch
import tensorflow as tf
import torch_xla.core.xla_model as xm
import torch_xla.core.xla_model as xm
import torch
import psutil # noqa
import torch
import torch
import torch
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from .utils import (
ExplicitEnum,
is_psutil_available,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_mps_available,
is_torch_npu_available,
is_torch_tpu_available,
is_torch_xpu_available,
requires_backends,
)
from .integrations import is_optuna_available
from .integrations import is_ray_tune_available
from ray import tune
from .integrations import is_wandb_available
from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size | 3,676 | raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available(check_device=True):
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available(check_device=True):
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
- num_tokens: number of tokens processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if runtime == 0:
return result
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
if num_tokens is not None:
tokens_per_second = num_tokens / runtime
result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
INVERSE_SQRT = "inverse_sqrt"
REDUCE_ON_PLATEAU = "reduce_lr_on_plateau"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
if is_torch_available():
if is_tf_available():
def seed_worker(_):
"""
Helper function to set worker seed during Dataloader initialization.
"""
worker_seed = torch.initial_seed() % 2**32
set_seed(worker_seed)
def enable_full_determinism(seed: int, warn_only: bool = False):
"""
Helper function for reproducible behavior during distributed training. See
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
- https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow
"""
# set seed first
set_seed(seed)
if is_torch_available():
# Enable PyTorch deterministic mode. This potentially requires either the environment
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
# depending on the CUDA version, so we set them both here
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.use_deterministic_algorithms(True, warn_only=warn_only)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if is_tf_available():
tf.config.experimental.enable_op_determinism()
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_torch_npu_available():
torch.npu.manual_seed_all(seed)
if is_torch_xpu_available():
torch.xpu.manual_seed_all(seed)
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction:
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
inputs (`np.ndarray`, *optional*)
"""
def __init__(
self,
predictions: Union[np.ndarray, Tuple[np.ndarray]],
label_ids: Union[np.ndarray, Tuple[np.ndarray]],
inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None,
):
self.predictions = predictions
self.label_ids = label_ids
self.inputs = inputs
def __iter__(self):
if self.inputs is not None:
return iter((self.predictions, self.label_ids, self.inputs))
else:
return iter((self.predictions, self.label_ids))
def __getitem__(self, idx):
if idx < 0 or idx > 2:
raise IndexError("tuple index out of range")
if idx == 2 and self.inputs is None:
raise IndexError("tuple index out of range")
if idx == 0:
return self.predictions
elif idx == 1:
return self.label_ids
elif idx == 2:
return self.inputs
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
run_summary (`Optional[Any]`):
A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
run_summary: Optional[Any] = None
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [
m
for m in metrics.keys()
if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time")
]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`"
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
def default_hp_space_wandb(trial) -> Dict[str, float]:
if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available(check_device=True):
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available(check_device=True):
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
- num_tokens: number of tokens processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if runtime == 0:
return result
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
if num_tokens is not None:
tokens_per_second = num_tokens / runtime
result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
INVERSE_SQRT = "inverse_sqrt"
REDUCE_ON_PLATEAU = "reduce_lr_on_plateau"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
| if not is_psutil_available(): | 1 | 2023-11-07 04:23:57+00:00 | 8k |
HKU-BAL/ClairS-TO | src/compare_vcf.py | [
{
"identifier": "str2bool",
"path": "shared/utils.py",
"snippet": "def str2bool(v):\n if v is None:\n return v\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'ture', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'flase', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "str_none",
"path": "shared/utils.py",
"snippet": "def str_none(v):\n if v is None:\n return None\n if v.upper() == \"NONE\":\n return None\n if isinstance(v, str):\n return v"
},
{
"identifier": "VcfReader",
"path": "shared/vcf.py",
"snippet": "class VcfReader(object):\n def __init__(self, vcf_fn,\n ctg_name=None,\n ctg_start=None,\n ctg_end=None,\n is_var_format=False,\n is_happy_format=False,\n is_fp=None,\n show_ref=True,\n direct_open=False,\n keep_row_str=False,\n skip_genotype=False,\n filter_tag=None,\n taf_filter=None,\n save_header=False,\n min_qual=None,\n max_qual=None,\n discard_indel=False,\n keep_af=False):\n self.vcf_fn = vcf_fn\n self.ctg_name = ctg_name\n self.ctg_start = ctg_start\n self.ctg_end = ctg_end\n self.variant_dict = defaultdict(Position)\n self.is_var_format = is_var_format\n self.is_happy_format = is_happy_format\n self.is_fp = is_fp\n self.show_ref = show_ref\n self.direct_open = direct_open\n self.keep_row_str = keep_row_str\n self.skip_genotype = skip_genotype\n self.filter_tag = filter_tag # PASS;HighConf PASS;MedConf in hcc1395\n self.taf_filter = taf_filter\n self.header = \"\"\n self.save_header = save_header\n self.discard_indel = discard_indel\n self.min_qual = min_qual\n self.max_qual = max_qual\n self.keep_af = keep_af\n\n def read_vcf(self):\n is_ctg_region_provided = self.ctg_start is not None and self.ctg_end is not None\n\n if self.vcf_fn is None or not os.path.exists(self.vcf_fn):\n return\n\n header_last_column = []\n if self.direct_open:\n vcf_fp = open(self.vcf_fn)\n vcf_fo = vcf_fp\n else:\n vcf_fp = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (self.vcf_fn)))\n vcf_fo = vcf_fp.stdout\n for row in vcf_fo:\n columns = row.strip().split()\n if columns[0][0] == \"#\":\n if self.save_header:\n self.header += row\n header_last_column = columns\n continue\n\n tumor_in_last = True if len(header_last_column) and header_last_column[\n -1].rstrip().lower() == \"tumor\" else False\n # position in vcf is 1-based\n chromosome, position = columns[0], columns[1]\n if self.ctg_name is not None and chromosome != self.ctg_name:\n continue\n if is_ctg_region_provided and not (self.ctg_start <= int(position) <= self.ctg_end):\n continue\n\n FILTER = columns[6] if len(columns) >= 7 else None\n if self.filter_tag is not None:\n filter_list = self.filter_tag.split(',')\n if sum([1 if filter == FILTER else 0 for filter in filter_list]) == 0:\n continue\n self.is_var_format = True if columns[2][0] in 'ACGT' else False\n self.is_var_format = False\n if self.is_var_format:\n reference, alternate = columns[2], columns[3]\n genotype_1 = int(columns[4])\n genotype_2 = int(columns[5])\n else:\n reference, alternate, last_column = columns[3], columns[4], columns[-1]\n\n if self.discard_indel and (len(reference) > 1 or len(alternate) > 1):\n continue\n\n try:\n qual = columns[5] if len(columns) > 5 else None\n\n if self.min_qual is not None and float(qual) < self.min_qual:\n continue\n\n if self.max_qual is not None and float(qual) > self.max_qual:\n continue\n except:\n qual = None\n\n last_column = last_column if not tumor_in_last else columns[-2]\n if self.is_happy_format and self.is_fp:\n last_column = columns[10]\n if self.is_happy_format and not self.is_fp:\n last_column = columns[9]\n genotype = last_column.split(\":\")[0].replace(\"/\", \"|\").replace(\".\", \"0\").split(\"|\")\n try:\n genotype_1, genotype_2 = genotype\n\n if int(genotype_1) > int(genotype_2):\n genotype_1, genotype_2 = genotype_2, genotype_1\n\n # remove * to guarentee vcf match\n if '*' in alternate:\n alternate = alternate.split(',')\n if int(genotype_1) + int(genotype_2) != 3 or len(alternate) != 2:\n print('error with variant representation')\n continue\n alternate = ''.join([alt_base for alt_base in alternate if alt_base != '*'])\n # * always have a genotype 1/2\n\n genotype_1, genotype_2 = '0', '1'\n except:\n genotype_1 = -1\n genotype_2 = -1\n if self.keep_af:\n tag_list = columns[8].split(':')\n if 'AF' in tag_list or 'VAF' in tag_list:\n taf_index = tag_list.index('AF') if 'AF' in tag_list else tag_list.index('VAF')\n taf = float(columns[9].split(':')[taf_index])\n else:\n taf = None\n else:\n taf = None\n position = int(position)\n have_extra_infos = 'VT' in row\n\n if genotype_1 == \"0\" and genotype_2 == \"0\" and not self.show_ref and not self.skip_genotype:\n continue\n extra_infos = columns[-1].split(':')[-1] if have_extra_infos else ''\n row_str = row if self.keep_row_str else False\n key = (chromosome, position) if self.ctg_name is None else position\n\n self.variant_dict[key] = Position(ctg_name=chromosome,\n pos=position,\n ref_base=reference,\n alt_base=alternate,\n genotype1=int(genotype_1),\n genotype2=int(genotype_2),\n qual=qual,\n row_str=row_str,\n af=taf,\n filter=FILTER,\n extra_infos=extra_infos)\n\n def get_alt_info(self, pos, extra_info=\"\"):\n pos = int(pos)\n if pos not in self.variant_dict:\n return \"\"\n ref_base = self.variant_dict[pos].reference_bases\n alt_base = ','.join(self.variant_dict[pos].alternate_bases)\n gentoype_str = '/'.join([str(g) for g in self.variant_dict[pos].genotype])\n extra_info = self.variant_dict[pos].extra_infos if self.variant_dict[pos].extra_infos != \"\" else extra_info\n return extra_info + '_' + ref_base + '_' + alt_base + '_' + gentoype_str"
},
{
"identifier": "VcfWriter",
"path": "shared/vcf.py",
"snippet": "class VcfWriter(object):\n def __init__(self,\n vcf_fn,\n ctg_name=None,\n ref_fn=None,\n sample_name=\"SAMPLE\",\n write_header=True,\n header=None,\n cmdline=None,\n show_ref_calls=False):\n self.vcf_fn = vcf_fn\n self.show_ref_calls = show_ref_calls\n # make directory if not exist\n vcf_folder = os.path.dirname(self.vcf_fn)\n if not os.path.exists(vcf_folder):\n print(\"[INFO] Output VCF folder {} not found, create it\".format(vcf_folder))\n return_code = run(\"mkdir -p {}\".format(vcf_folder), shell=True)\n\n self.vcf_writer = open(self.vcf_fn, 'w')\n self.ref_fn = ref_fn\n self.ctg_name = ctg_name\n if ctg_name is not None:\n self.ctg_name_list = ctg_name.split(',') if ',' in ctg_name else [ctg_name]\n else:\n self.ctg_name_list = None\n self.sample_name = sample_name\n if write_header:\n self.write_header(ref_fn=ref_fn, header=header, cmdline=cmdline)\n\n def close(self):\n try:\n self.vcf_writer.close()\n except:\n pass\n\n def write_header(self, ctg_name=None, ref_fn=None, header=None, cmdline=None):\n header = vcf_header if header is None else header\n if cmdline is not None and cmdline != \"\":\n header_list = header.rstrip('\\n').split('\\n')\n insert_index = 3 if len(header_list) >= 3 else len(header_list) - 1\n header_list.insert(insert_index, \"##cmdline={}\".format(cmdline))\n header = \"\\n\".join(header_list) + '\\n'\n if self.ref_fn is not None:\n reference_index_file_path = file_path_from(self.ref_fn, suffix=\".fai\", exit_on_not_found=True, sep='.')\n with open(reference_index_file_path, \"r\") as fai_fp:\n for row in fai_fp:\n columns = row.strip().split(\"\\t\")\n contig_name, contig_size = columns[0], columns[1]\n if self.ctg_name_list is not None and contig_name not in self.ctg_name_list:\n continue\n header += \"##contig=<ID=%s,length=%s>\\n\" % (contig_name, contig_size)\n\n header += '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\n' % (self.sample_name)\n\n self.vcf_writer.write(header)\n\n def write_row(self,\n POS=None,\n REF=None,\n ALT=None,\n QUAL=0,\n GT='0/0',\n DP=0,\n AF=0,\n AD=None,\n CHROM=None,\n GQ=None,\n ID='.',\n FILTER=\".\",\n INFO='.',\n TAF=None,\n VT=None,\n TDP=None,\n AU=None,\n CU=None,\n GU=None,\n TU=None,\n row_str=None):\n if row_str is not None:\n self.vcf_writer.write(row_str)\n return\n GQ = GQ if GQ else QUAL\n CHROM = CHROM if CHROM else self.ctg_name\n if not self.show_ref_calls and (GT == \"0/0\" or GT == \"./.\"):\n return\n FORMAT = \"GT:GQ:DP:AF\"\n FORMAT_V = \"%s:%.4f:%d:%.4f\" % (GT, GQ, DP, AF)\n basic_vcf_format = \"%s\\t%d\\t%s\\t%s\\t%s\\t%.4f\\t%s\\t%s\" % (\n CHROM,\n int(POS),\n ID,\n REF,\n ALT,\n QUAL,\n FILTER,\n INFO\n )\n if AD is not None and AD != \"\":\n FORMAT += \":AD\"\n FORMAT_V += \":%s\" % (AD)\n if TAF is not None:\n FORMAT += \":TAF\"\n FORMAT_V += \":%.4f\" % (TAF)\n if TDP is not None:\n FORMAT += \":TDP\"\n FORMAT_V += \":%d\" % (TDP)\n if AU is not None and CU is not None and GU is not None and TU is not None:\n FORMAT += \":AU:CU:GU:TU\"\n FORMAT_V += \":%d:%d:%d:%d\" % (AU, CU, GU, TU)\n\n if VT is not None:\n FORMAT += \":VT\"\n FORMAT_V += \":%s\" % (VT)\n vcf_format = '\\t'.join([basic_vcf_format, FORMAT, FORMAT_V]) + \"\\n\"\n\n self.vcf_writer.write(vcf_format)"
},
{
"identifier": "bed_tree_from",
"path": "shared/interval_tree.py",
"snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree"
},
{
"identifier": "is_region_in",
"path": "shared/interval_tree.py",
"snippet": "def is_region_in(tree, contig_name, region_start=None, region_end=None):\n if not tree or (contig_name is None) or (contig_name not in tree):\n return False\n\n interval_tree = tree[contig_name]\n return len(\n interval_tree.at(region_start)\n if region_end is None else\n interval_tree.overlap(begin=region_start, end=region_end)\n ) > 0"
},
{
"identifier": "file_path_from",
"path": "shared/utils.py",
"snippet": "def file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\n if allow_none and file_name is None:\n return None\n if is_directory:\n is_folder_exists(file_name, suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] directory %s not found\" % (file_name + suffix)))\n if is_file_exists(file_name, suffix):\n return abspath(file_name + suffix)\n #allow fn.bam.bai->fn.bai fn.fa.fai->fn.fai\n elif sep != \"\" and len(sep) == 1:\n file_name_remove_suffix = sep.join(file_name.split(sep)[:-1])\n if is_file_exists(file_name_remove_suffix, suffix):\n return abspath(file_name_remove_suffix + suffix)\n if exit_on_not_found:\n exit(log_error(\"[ERROR] file %s not found\" % (file_name + suffix)))\n return None"
},
{
"identifier": "cal_af",
"path": "src/cal_af_distribution.py",
"snippet": "def cal_af(args, truth_variant_dict=None, input_variant_dict=None):\n ctg_name = args.ctg_name\n output_path = args.output_path\n\n if truth_variant_dict is None:\n truth_vcf_fn = args.truth_vcf_fn\n vcf_reader = VcfReader(vcf_fn=truth_vcf_fn,\n ctg_name=ctg_name,\n show_ref=False,\n keep_row_str=True,\n filter_tag=args.truth_filter_tag)\n vcf_reader.read_vcf()\n truth_variant_dict = vcf_reader.variant_dict\n\n if input_variant_dict is None:\n input_vcf_fn = args.input_vcf_fn\n vcf_reader = VcfReader(vcf_fn=input_vcf_fn,\n ctg_name=ctg_name,\n show_ref=False,\n keep_row_str=True,\n filter_tag=args.input_filter_tag)\n vcf_reader.read_vcf()\n input_variant_dict = vcf_reader.variant_dict\n\n results_dict = defaultdict()\n variant_dict = defaultdict()\n\n if output_path is not None:\n output_file = open(output_path, 'w')\n for k, v in truth_variant_dict.items():\n if k not in input_variant_dict:\n variant_dict[k] = v\n else:\n result = parser_info(input_variant_dict[k].row_str)\n if output_path is not None:\n output_file.write(' '.join(str(item) for item in result) + '\\n')\n else:\n key = k if args.ctg_name is None else (args.ctg_name, k)\n results_dict[key] = result\n\n min_mq = param.min_mq\n min_bq = param.min_bq\n excl_flag = 2316\n\n phasing_option = \"--output-extra HP \" if args.phase_output else \" \"\n samtools_command = \"{} mpileup --min-MQ {} --min-BQ {} --excl-flags {} {} \".format(args.samtools,\n min_mq,\n min_bq,\n excl_flag,\n phasing_option)\n\n global tumor_samtools_command\n tumor_samtools_command = samtools_command + args.tumor_bam_fn\n\n total_num = 0\n print(\"[INFO] Total truth need to calculate AF: {}\".format(len(variant_dict)))\n\n with concurrent.futures.ProcessPoolExecutor(max_workers=args.threads) as exec:\n for result in exec.map(extract_base, list(variant_dict.values())):\n total_num += 1\n if total_num % 1000 == 0 and total_num > 0:\n print(\"[INFO] Total processed positions: {}\".format(total_num))\n if result is not None:\n ctg_name, pos, tumor_depth, tumor_alt_depth, HAP_LIST, ALL_HAP_LIST = result\n k = (ctg_name, int(pos))\n results_dict[k] = ctg_name, pos, tumor_depth, tumor_alt_depth, HAP_LIST, ALL_HAP_LIST\n if output_path is not None:\n output_file.write(' '.join(str(item) for item in result) + '\\n')\n\n if output_path is not None:\n output_file.close()\n return\n\n return results_dict"
}
] | import os
import sys
import subprocess
from argparse import ArgumentParser, SUPPRESS
from collections import defaultdict
from shared.utils import str2bool, str_none
from shared.vcf import VcfReader, VcfWriter
from shared.interval_tree import bed_tree_from, is_region_in
from shared.utils import file_path_from
from src.cal_af_distribution import cal_af | 6,130 | # BSD 3-Clause License
#
# Copyright 2023 The University of Hong Kong, Department of Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
major_contigs = {"chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]}.union(
{str(a) for a in list(range(1, 23)) + ["X", "Y"]})
def sort_key(item):
order_map = {value: index for index, value in enumerate(major_contigs_order)}
chr = order_map[item[0]]
pos = item[1]
return (chr, pos)
def cal_metrics(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1_score = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
return round(precision, 4), round(recall, 4), round(f1_score, 4)
def output_best_cut_off(fp_qual_dict, tp_qual_dict, fn_count, use_int_cut_off=True, add_tp_fn=False):
results = []
if use_int_cut_off:
qual_list = set([int(q) for q in list(fp_qual_dict.values()) + list(tp_qual_dict.values())])
else:
qual_list = [item / 100.0 for item in range(0, 101)]
for qual in qual_list:
fp_snv = sum([1 for k, v in fp_qual_dict.items() if v >= qual])
tp_snv = sum([1 for k, v in tp_qual_dict.items() if v >= qual])
fn_snv = fn_count + len(tp_qual_dict) - tp_snv
snv_pre, snv_rec, snv_f1 = cal_metrics(tp=tp_snv, fp=fp_snv, fn=fn_snv)
tp_fn = tp_snv + fn_snv
results.append([qual, snv_pre, snv_rec, snv_f1, tp_snv, fp_snv, fn_snv, tp_fn])
results = sorted(results, key=lambda x: x[3], reverse=True)
return results
def compare_vcf(args):
"""
Follow how som.py works
## https://github.com/Illumina/hap.py/blob/master/doc/sompy.md
"""
output_fn = args.output_fn
output_dir = args.output_dir
truth_vcf_fn = args.truth_vcf_fn
input_vcf_fn = args.input_vcf_fn
bed_fn = args.bed_fn
high_confident_only = args.high_confident_only
ctg_name = args.ctg_name
skip_genotyping = args.skip_genotyping
input_filter_tag = args.input_filter_tag
truth_filter_tag = args.truth_filter_tag
discard_fn_out_of_fp_bed = args.discard_fn_out_of_fp_bed
benchmark_indel = args.benchmark_indel
fp_bed_tree = bed_tree_from(bed_file_path=bed_fn, contig_name=ctg_name)
strat_bed_tree_list = []
if args.strat_bed_fn is not None and ',' in args.strat_bed_fn:
for strat_bed_fn in args.strat_bed_fn.split(','):
strat_bed_tree_list.append(bed_tree_from(bed_file_path=strat_bed_fn, contig_name=ctg_name))
elif args.strat_bed_fn is not None:
strat_bed_tree_list = [bed_tree_from(bed_file_path=args.strat_bed_fn, contig_name=ctg_name)]
truth_vcf_fn = file_path_from(file_name=truth_vcf_fn, exit_on_not_found=True, allow_none=False)
input_vcf_fn = file_path_from(file_name=input_vcf_fn, exit_on_not_found=True, allow_none=False)
| # BSD 3-Clause License
#
# Copyright 2023 The University of Hong Kong, Department of Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
major_contigs = {"chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]}.union(
{str(a) for a in list(range(1, 23)) + ["X", "Y"]})
def sort_key(item):
order_map = {value: index for index, value in enumerate(major_contigs_order)}
chr = order_map[item[0]]
pos = item[1]
return (chr, pos)
def cal_metrics(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1_score = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
return round(precision, 4), round(recall, 4), round(f1_score, 4)
def output_best_cut_off(fp_qual_dict, tp_qual_dict, fn_count, use_int_cut_off=True, add_tp_fn=False):
results = []
if use_int_cut_off:
qual_list = set([int(q) for q in list(fp_qual_dict.values()) + list(tp_qual_dict.values())])
else:
qual_list = [item / 100.0 for item in range(0, 101)]
for qual in qual_list:
fp_snv = sum([1 for k, v in fp_qual_dict.items() if v >= qual])
tp_snv = sum([1 for k, v in tp_qual_dict.items() if v >= qual])
fn_snv = fn_count + len(tp_qual_dict) - tp_snv
snv_pre, snv_rec, snv_f1 = cal_metrics(tp=tp_snv, fp=fp_snv, fn=fn_snv)
tp_fn = tp_snv + fn_snv
results.append([qual, snv_pre, snv_rec, snv_f1, tp_snv, fp_snv, fn_snv, tp_fn])
results = sorted(results, key=lambda x: x[3], reverse=True)
return results
def compare_vcf(args):
"""
Follow how som.py works
## https://github.com/Illumina/hap.py/blob/master/doc/sompy.md
"""
output_fn = args.output_fn
output_dir = args.output_dir
truth_vcf_fn = args.truth_vcf_fn
input_vcf_fn = args.input_vcf_fn
bed_fn = args.bed_fn
high_confident_only = args.high_confident_only
ctg_name = args.ctg_name
skip_genotyping = args.skip_genotyping
input_filter_tag = args.input_filter_tag
truth_filter_tag = args.truth_filter_tag
discard_fn_out_of_fp_bed = args.discard_fn_out_of_fp_bed
benchmark_indel = args.benchmark_indel
fp_bed_tree = bed_tree_from(bed_file_path=bed_fn, contig_name=ctg_name)
strat_bed_tree_list = []
if args.strat_bed_fn is not None and ',' in args.strat_bed_fn:
for strat_bed_fn in args.strat_bed_fn.split(','):
strat_bed_tree_list.append(bed_tree_from(bed_file_path=strat_bed_fn, contig_name=ctg_name))
elif args.strat_bed_fn is not None:
strat_bed_tree_list = [bed_tree_from(bed_file_path=args.strat_bed_fn, contig_name=ctg_name)]
truth_vcf_fn = file_path_from(file_name=truth_vcf_fn, exit_on_not_found=True, allow_none=False)
input_vcf_fn = file_path_from(file_name=input_vcf_fn, exit_on_not_found=True, allow_none=False)
| truth_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, | 2 | 2023-11-07 04:39:16+00:00 | 8k |
the-siesta-group/edfio | edfio/edf.py | [
{
"identifier": "RawHeaderFieldDate",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldDate(RawHeaderField[datetime.date]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> datetime.date:\n date = decode_str(field)\n match = DATE_OR_TIME_PATTERN.fullmatch(date)\n if match is None:\n raise ValueError(f\"Invalid date for format DD.MM.YY: {date!r}\")\n day, month, year = (int(g) for g in match.groups())\n if year >= 85: # noqa: PLR2004\n year += 1900\n else:\n year += 2000\n return datetime.date(year, month, day)\n\n def encode(self, value: datetime.date) -> bytes:\n if not 1985 <= value.year <= 2084: # noqa: PLR2004\n raise ValueError(\"EDF only allows dates from 1985 to 2084\")\n return encode_str(value.strftime(\"%d.%m.%y\"), self.length)"
},
{
"identifier": "RawHeaderFieldFloat",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldFloat(RawHeaderField[float]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> float:\n return decode_float(field)\n\n def encode(self, value: float) -> bytes:\n return encode_float(value, self.length)"
},
{
"identifier": "RawHeaderFieldInt",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldInt(RawHeaderField[int]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> int:\n return int(decode_str(field))\n\n def encode(self, value: int) -> bytes:\n return encode_int(value, self.length)"
},
{
"identifier": "RawHeaderFieldStr",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldStr(RawHeaderField[str]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> str:\n return decode_str(field)\n\n def encode(self, value: str) -> bytes:\n return encode_str(value, self.length)"
},
{
"identifier": "RawHeaderFieldTime",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldTime(RawHeaderField[datetime.time]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, field: bytes) -> datetime.time:\n time = decode_str(field)\n match = DATE_OR_TIME_PATTERN.fullmatch(time)\n if match is None:\n raise ValueError(f\"Invalid time for format hh.mm.ss: {time!r}\")\n hours, minutes, seconds = (int(g) for g in match.groups())\n return datetime.time(hours, minutes, seconds)\n\n def encode(self, value: datetime.time) -> bytes:\n return encode_str(value.isoformat().replace(\":\", \".\"), self.length)"
},
{
"identifier": "encode_str",
"path": "edfio/_header_field.py",
"snippet": "def encode_str(value: str, length: int) -> bytes:\n if len(value) > length:\n raise ValueError(\n f\"{value!r} exceeds maximum field length: {len(value)} > {length}\"\n )\n if not value.isprintable():\n raise ValueError(f\"{value} contains non-printable characters\")\n return value.encode(\"ascii\").ljust(length)"
},
{
"identifier": "get_header_fields",
"path": "edfio/_header_field.py",
"snippet": "def get_header_fields(cls: type) -> Iterator[tuple[str, int]]:\n for name, value in cls.__dict__.items():\n if isinstance(value, RawHeaderField):\n yield name, value.length"
},
{
"identifier": "FloatRange",
"path": "edfio/_utils.py",
"snippet": "class FloatRange(NamedTuple):\n min: float\n max: float"
},
{
"identifier": "IntRange",
"path": "edfio/_utils.py",
"snippet": "class IntRange(NamedTuple):\n min: int\n max: int"
},
{
"identifier": "calculate_gain_and_offset",
"path": "edfio/_utils.py",
"snippet": "def calculate_gain_and_offset(\n digital_min: int,\n digital_max: int,\n physical_min: float,\n physical_max: float,\n) -> tuple[float, float]:\n gain = (physical_max - physical_min) / (digital_max - digital_min)\n offset = physical_max / gain - digital_max\n return gain, offset"
},
{
"identifier": "decode_edfplus_date",
"path": "edfio/_utils.py",
"snippet": "def decode_edfplus_date(date: str) -> datetime.date:\n day, month, year = date.split(\"-\")\n try:\n month_int = _MONTH_NAMES.index(month.upper()) + 1\n except ValueError:\n raise ValueError(f\"Invalid month: {month}, options: {_MONTH_NAMES}\") from None\n return datetime.date(int(year), month_int, int(day))"
},
{
"identifier": "encode_annotation_duration",
"path": "edfio/_utils.py",
"snippet": "def encode_annotation_duration(duration: float) -> str:\n if duration < 0:\n raise ValueError(f\"Annotation duration must be positive, is {duration}\")\n string = f\"{duration:.12f}\".rstrip(\"0\")\n if string[-1] == \".\":\n return string[:-1]\n return string"
},
{
"identifier": "encode_annotation_onset",
"path": "edfio/_utils.py",
"snippet": "def encode_annotation_onset(onset: float) -> str:\n string = f\"{onset:+.12f}\".rstrip(\"0\")\n if string[-1] == \".\":\n return string[:-1]\n return string"
},
{
"identifier": "encode_edfplus_date",
"path": "edfio/_utils.py",
"snippet": "def encode_edfplus_date(date: datetime.date) -> str:\n return f\"{date.day:02}-{_MONTH_NAMES[date.month - 1]}-{date.year:02}\""
},
{
"identifier": "repr_from_init",
"path": "edfio/_utils.py",
"snippet": "def repr_from_init(obj: Any) -> str:\n parameters = []\n for name in inspect.signature(obj.__class__).parameters:\n parameters.append(f\"{name}={getattr(obj, name)!r}\")\n return f\"{obj.__class__.__name__}({', '.join(parameters)})\""
},
{
"identifier": "round_float_to_8_characters",
"path": "edfio/_utils.py",
"snippet": "def round_float_to_8_characters(\n value: float,\n round_func: Callable[[float], int],\n) -> float:\n if isinstance(value, int) or value.is_integer():\n return value\n length = 8\n integer_part_length = str(value).find(\".\")\n if integer_part_length == length:\n return round_func(value)\n factor = 10 ** (length - 1 - integer_part_length)\n return round_func(value * factor) / factor"
},
{
"identifier": "validate_subfields",
"path": "edfio/_utils.py",
"snippet": "def validate_subfields(subfields: dict[str, str]) -> None:\n for key, value in subfields.items():\n if not value:\n raise ValueError(f\"Subfield {key} must not be an empty string\")\n if \" \" in value:\n raise ValueError(f\"Subfield {key} contains spaces: {value!r}\")"
}
] | import contextlib
import copy
import datetime
import io
import math
import re
import warnings
import numpy as np
import numpy.typing as npt
from collections.abc import Iterable, Sequence
from dataclasses import dataclass
from decimal import Decimal
from fractions import Fraction
from functools import singledispatch
from math import ceil, floor
from pathlib import Path
from typing import Any, Literal, NamedTuple
from edfio._header_field import (
RawHeaderFieldDate,
RawHeaderFieldFloat,
RawHeaderFieldInt,
RawHeaderFieldStr,
RawHeaderFieldTime,
encode_str,
get_header_fields,
)
from edfio._utils import (
FloatRange,
IntRange,
calculate_gain_and_offset,
decode_edfplus_date,
encode_annotation_duration,
encode_annotation_onset,
encode_edfplus_date,
repr_from_init,
round_float_to_8_characters,
validate_subfields,
) | 4,179 | index = seconds * signal.sampling_frequency
if index != int(index):
raise ValueError(
f"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz"
)
def _shift_startdatetime(self, seconds: float) -> None:
timedelta = datetime.timedelta(seconds=seconds)
try:
startdate = self.startdate
startdate_anonymized = False
except AnonymizedDateError:
startdate = datetime.date.fromtimestamp(0)
startdate_anonymized = True
startdatetime = datetime.datetime.combine(startdate, self.starttime)
startdatetime += timedelta
if not startdate_anonymized:
self.startdate = startdatetime.date()
self.starttime = startdatetime.time()
def copy(self) -> Edf:
"""
Create a deep copy of the Edf.
Returns
-------
Edf
The copied Edf object.
"""
return copy.deepcopy(self)
def _slice_annotations_signal(
self,
signal: EdfSignal,
*,
start: float,
stop: float,
keep_all_annotations: bool,
) -> EdfSignal:
is_timekeeping_signal = signal == self._timekeeping_signal
annotations: list[EdfAnnotation] = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if is_timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
annotations = [
EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)
for a in annotations
if keep_all_annotations or start <= a.onset < stop
]
return _create_annotations_signal(
annotations,
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
with_timestamps=is_timekeeping_signal,
subsecond_offset=self._subsecond_offset + start - int(start),
)
def _create_annotations_signal(
annotations: Iterable[EdfAnnotation],
*,
num_data_records: int,
data_record_duration: float,
with_timestamps: bool = True,
subsecond_offset: float = 0,
) -> EdfSignal:
data_record_starts = np.arange(num_data_records) * data_record_duration
annotations = sorted(annotations)
data_records = []
for i, start in enumerate(data_record_starts):
end = start + data_record_duration
tals: list[_EdfTAL] = []
if with_timestamps:
tals.append(_EdfTAL(np.round(start + subsecond_offset, 12), None, [""]))
for ann in annotations:
if (
(i == 0 and ann.onset < 0)
or (i == (num_data_records - 1) and end <= ann.onset)
or (start <= ann.onset < end)
):
tals.append(
_EdfTAL(
np.round(ann.onset + subsecond_offset, 12),
ann.duration,
[ann.text],
)
)
data_records.append(_EdfAnnotationsDataRecord(tals).to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
divisor = data_record_duration if data_record_duration else 1
signal = EdfSignal(
np.arange(1.0), # placeholder signal, as argument `data` is non-optional
sampling_frequency=maxlen // 2 / divisor,
physical_range=(-32768, 32767),
)
signal._label = "EDF Annotations"
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
signal._digital = np.frombuffer(raw, dtype=np.int16).copy()
return signal
@dataclass
class _EdfTAL:
onset: float
duration: float | None
texts: list[str]
def to_bytes(self) -> bytes:
timing = encode_annotation_onset(self.onset)
if self.duration is not None:
| from __future__ import annotations
_ANNOTATIONS_PATTERN = re.compile(
"""
([+-]\\d+(?:\\.?\\d+)?) # onset
(?:\x15(\\d+(?:\\.?\\d+)?))? # duration, optional
(?:\x14(.*?)) # annotation texts
\x14\x00 # terminator
""",
re.VERBOSE,
)
class AnonymizedDateError(ValueError):
"""Raised when trying to access an anonymized startdate or birthdate."""
class EdfAnnotation(NamedTuple):
"""A single EDF+ annotation.
Parameters
----------
onset : float
The annotation onset in seconds from recording start.
duration : float | None
The annotation duration in seconds (`None` if annotation has no duration).
text : str
The annotation text, can be empty.
"""
onset: float
duration: float | None
text: str
class EdfSignal:
"""A single EDF signal.
Attributes that might break the signal or file on modification (i.e.,
`sampling_frequency`, `physical_range`, `digital_range`, `samples_per_data_record`,
and `reserved`) can not be set after instantiation.
To reduce memory consumption, signal data is always stored as a 16-bit integer array
containing the digital values that would be written to the corresponding EDF file.
Therefore, it is expected that `EdfSignal.data` does not match the physical
values passed during instantiation exactly.
Parameters
----------
data : npt.NDArray[np.float64]
The signal data (physical values).
sampling_frequency : float
The sampling frequency in Hz.
label : str, default: `""`
The signal's label, e.g., `"EEG Fpz-Cz"` or `"Body temp"`.
transducer_type : str, default: `""`
The transducer type, e.g., `"AgAgCl electrode"`.
physical_dimension : str, default: `""`
The physical dimension, e.g., `"uV"` or `"degreeC"`
physical_range : tuple[float, float] | None, default: None
The physical range given as a tuple of `(physical_min, physical_max)`. If
`None`, this is determined from the data.
digital_range : tuple[int, int], default: `(-32768, 32767)`
The digital range given as a tuple of `(digital_min, digital_max)`. Uses the
maximum resolution of 16-bit integers by default.
prefiltering : str, default: `""`
The signal prefiltering, e.g., `"HP:0.1Hz LP:75Hz"`.
"""
_label = RawHeaderFieldStr(16, is_settable=True)
transducer_type = RawHeaderFieldStr(80, is_settable=True)
"""Transducer type, e.g., `"AgAgCl electrode"`."""
physical_dimension = RawHeaderFieldStr(8, is_settable=True)
"""Physical dimension, e.g., `"uV"` or `"degreeC"`."""
physical_min = RawHeaderFieldFloat(8)
"""Physical minimum, e.g., `-500` or `34`."""
physical_max = RawHeaderFieldFloat(8)
"""Physical maximum, e.g., `500` or `40`."""
digital_min = RawHeaderFieldInt(8)
"""Digital minimum, e.g., `-2048`."""
digital_max = RawHeaderFieldInt(8)
"""Digital maximum, e.g., `2047`."""
prefiltering = RawHeaderFieldStr(80, is_settable=True)
"""Signal prefiltering, e.g., `"HP:0.1Hz LP:75Hz"`."""
samples_per_data_record = RawHeaderFieldInt(8)
"""
Number of samples in each data record.
For newly instantiated :class:`EdfSignal` objects, this is only set once
:meth:`Edf.write` is called.
"""
reserved = RawHeaderFieldStr(32)
"""Reserved signal header field, always `""`"""
def __init__(
self,
data: npt.NDArray[np.float64],
sampling_frequency: float,
*,
label: str = "",
transducer_type: str = "",
physical_dimension: str = "",
physical_range: tuple[float, float] | None = None,
digital_range: tuple[int, int] = (-32768, 32767),
prefiltering: str = "",
):
self._sampling_frequency = sampling_frequency
self.label = label
self.transducer_type = transducer_type
self.physical_dimension = physical_dimension
self.prefiltering = prefiltering
self._reserved = EdfSignal.reserved.encode("")
if not np.all(np.isfinite(data)):
raise ValueError("Signal data must contain only finite values")
self._set_physical_range(physical_range, data)
self._set_digital_range(digital_range)
self._set_data(data)
def __repr__(self) -> str:
info = f"{self.sampling_frequency:g}Hz"
if self.label:
info = f"{self.label} " + info
return f"<EdfSignal {info}>"
@classmethod
def _from_raw_header(
cls,
sampling_frequency: float,
*,
_label: bytes,
transducer_type: bytes,
physical_dimension: bytes,
physical_min: bytes,
physical_max: bytes,
digital_min: bytes,
digital_max: bytes,
prefiltering: bytes,
samples_per_data_record: bytes,
reserved: bytes,
) -> EdfSignal:
sig = object.__new__(cls)
sig._sampling_frequency = sampling_frequency
sig._label = EdfSignal._label.decode(_label) # type: ignore[attr-defined]
sig._transducer_type = transducer_type # type: ignore[attr-defined]
sig._physical_dimension = physical_dimension # type: ignore[attr-defined]
sig._physical_min = physical_min # type: ignore[attr-defined]
sig._physical_max = physical_max # type: ignore[attr-defined]
sig._digital_min = digital_min # type: ignore[attr-defined]
sig._digital_max = digital_max # type: ignore[attr-defined]
sig._prefiltering = prefiltering # type: ignore[attr-defined]
sig._samples_per_data_record = samples_per_data_record # type: ignore[attr-defined]
sig._reserved = reserved # type: ignore[attr-defined]
return sig
@classmethod
def from_hypnogram(
cls,
stages: npt.NDArray[np.float64],
stage_duration: float = 30,
*,
label: str = "",
) -> EdfSignal:
"""Create an EDF signal from a hypnogram, with scaling according to EDF specs.
According to the EDF FAQ [1]_, use integer numbers 0, 1, 2, 3, 4, 5, 6, and 9
for sleep stages W, 1, 2, 3, 4, R, MT, und unscored, respectively. The digital
range is set to `(0, 9)`.
Parameters
----------
stages : npt.NDArray[np.float64]
The sleep stages, coded as integer numbers.
stage_duration : float, default: `30`
The duration of each sleep stage in seconds, used to set the sampling
frequency to its inverse.
label : str, default: `""`
The signal's label.
Returns
-------
EdfSignal
The resulting :class:`EdfSignal` object.
References
----------
.. [1] EDF FAQ, https://www.edfplus.info/specs/edffaq.html
"""
allowed_stages = {0, 1, 2, 3, 4, 5, 6, 9}
if invalid_stages := set(stages) - allowed_stages:
raise ValueError(f"stages contains invalid values: {invalid_stages}")
return EdfSignal(
data=stages,
sampling_frequency=1 / stage_duration,
label=label,
physical_range=(0, 9),
digital_range=(0, 9),
)
@property
def label(self) -> str:
"""Signal label, e.g., `"EEG Fpz-Cz"` or `"Body temp"`."""
return self._label
@label.setter
def label(self, label: str) -> None:
if label == "EDF Annotations":
raise ValueError("Ordinary signal label must not be 'EDF Annotations'.")
self._label = label
@property
def physical_range(self) -> FloatRange:
"""The physical range as a tuple of `(physical_min, physical_max)`."""
return FloatRange(self.physical_min, self.physical_max)
@property
def digital_range(self) -> IntRange:
"""The digital range as a tuple of `(digital_min, digital_max)`."""
return IntRange(self.digital_min, self.digital_max)
@property
def sampling_frequency(self) -> float:
"""The sampling frequency in Hz."""
return self._sampling_frequency
@property
def data(self) -> npt.NDArray[np.float64]:
"""
Numpy array containing the physical signal values as floats.
To simplify avoiding inconsistencies between signal data and header fields,
individual values in the returned array can not be modified. Use
:meth:`EdfSignal.update_data` to overwrite with new physical data.
"""
try:
gain, offset = calculate_gain_and_offset(
self.digital_min,
self.digital_max,
self.physical_min,
self.physical_max,
)
except ZeroDivisionError:
data = self._digital.astype(np.float64)
warnings.warn(
f"Digital minimum equals digital maximum ({self.digital_min}) for {self.label}, returning uncalibrated signal."
)
except ValueError:
data = self._digital.astype(np.float64)
else:
data = (self._digital + offset) * gain
data.setflags(write=False)
return data
def update_data(
self,
data: npt.NDArray[np.float64],
*,
keep_physical_range: bool = False,
sampling_frequency: float | None = None,
) -> None:
"""
Overwrite physical signal values with an array of equal length.
Parameters
----------
data : npt.NDArray[np.float64]
The new physical data.
keep_physical_range : bool, default: False
If `True`, the `physical_range` is not modified to accomodate the new data.
sampling_frequency : float | None, default: None
If not `None`, the `sampling_frequency` is updated to the new value. The new
data must match the expected length for the new sampling frequency.
"""
expected_length = len(self._digital)
if (
sampling_frequency is not None
and sampling_frequency != self._sampling_frequency
):
expected_length = self._get_expected_new_length(sampling_frequency)
if len(data) != expected_length:
raise ValueError(
f"Signal lengths must match: got {len(data)}, expected {len(self._digital)}."
)
physical_range = self.physical_range if keep_physical_range else None
self._set_physical_range(physical_range, data)
if sampling_frequency is not None:
self._sampling_frequency = sampling_frequency
self._set_data(data)
def _get_expected_new_length(self, sampling_frequency: float) -> int:
if sampling_frequency <= 0:
raise ValueError(
f"Sampling frequency must be positive, got {sampling_frequency}"
)
current_length = len(self._digital)
expected_length_f = (
sampling_frequency / self._sampling_frequency * current_length
)
if not math.isclose(expected_length_f, round(expected_length_f), rel_tol=1e-10):
raise ValueError(
f"Sampling frequency of {sampling_frequency} results in non-integer number of samples ({expected_length_f})"
)
return round(expected_length_f)
def _set_digital_range(self, digital_range: tuple[int, int]) -> None:
digital_range = IntRange(*digital_range)
if digital_range.min == digital_range.max:
raise ValueError(
f"Digital minimum ({digital_range.min}) must differ from digital maximum ({digital_range.max})."
)
self._digital_min = EdfSignal.digital_min.encode(digital_range.min)
self._digital_max = EdfSignal.digital_max.encode(digital_range.max)
def _set_physical_range(
self,
physical_range: tuple[float, float] | None,
data: npt.NDArray[np.float64],
) -> None:
if physical_range is None:
physical_range = FloatRange(data.min(), data.max())
if physical_range.min == physical_range.max:
physical_range = FloatRange(physical_range.min, physical_range.max + 1)
else:
physical_range = FloatRange(*physical_range)
if physical_range.min == physical_range.max:
raise ValueError(
f"Physical minimum ({physical_range.min}) must differ from physical maximum ({physical_range.max})."
)
data_min = data.min()
data_max = data.max()
if data_min < physical_range.min or data_max > physical_range.max:
raise ValueError(
f"Signal range [{data_min}, {data_max}] out of physical range: [{physical_range.min}, {physical_range.max}]"
)
self._physical_min = EdfSignal.physical_min.encode(
round_float_to_8_characters(physical_range.min, math.floor)
)
self._physical_max = EdfSignal.physical_max.encode(
round_float_to_8_characters(physical_range.max, math.ceil)
)
def _set_data(self, data: npt.NDArray[np.float64]) -> None:
gain, offset = calculate_gain_and_offset(
self.digital_min,
self.digital_max,
self.physical_min,
self.physical_max,
)
self._digital = np.round(data / gain - offset).astype(np.int16)
class Patient:
"""
Object representation of the local patient identification.
Parsing from/to the string containing the local_patient_identification header field
is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may not
contain spaces.
Parameters
----------
code : str, default: `"X"`
The code by which the patient is known in the hospital administration.
sex : `{"X", "F", "M"}`, default: `"X"`
Sex, `F` for female, `M` for male, `X` if anonymized.
birthdate : datetime.date | None, default: None
Patient birthdate, stored as `X` if `None`.
name : str, default: `"X"`
The patient's name, stored as `X` if `None`.
additional : Sequence[str], default: `()`
Optional additional subfields. Will be stored in the header field separated by
spaces.
"""
def __init__(
self,
*,
code: str = "X",
sex: Literal["F", "M", "X"] = "X",
birthdate: datetime.date | None = None,
name: str = "X",
additional: Sequence[str] = (),
) -> None:
if sex not in ("F", "M", "X"):
raise ValueError(f"Invalid sex: {sex}, must be one of F, M, X")
if birthdate is None:
birthdate_field = "X"
else:
birthdate_field = encode_edfplus_date(birthdate)
subfields = {
"code": code,
"sex": sex,
"birthdate": birthdate_field,
"name": name,
**{f"additional[{i}]": v for i, v in enumerate(additional)},
}
validate_subfields(subfields)
local_patient_identification = " ".join(subfields.values())
encode_str(local_patient_identification, 80)
self._local_patient_identification = local_patient_identification
def __repr__(self) -> str:
try:
return repr_from_init(self)
except Exception:
return repr(self._local_patient_identification)
@classmethod
def _from_str(cls, string: str) -> Patient:
encode_str(string, 80)
obj = object.__new__(cls)
obj._local_patient_identification = string
return obj
def _to_str(self) -> str:
return self._local_patient_identification
@property
def code(self) -> str:
"""The code by which the patient is known in the hospital administration."""
return self.get_subfield(0)
@property
def sex(self) -> str:
"""Sex, `F` for female, `M` for male, `X` if anonymized."""
return self.get_subfield(1)
@property
def birthdate(self) -> datetime.date:
"""Patient birthdate."""
birthdate_field = self.get_subfield(2)
if birthdate_field == "X":
raise AnonymizedDateError("Patient birthdate is not available ('X').")
return decode_edfplus_date(birthdate_field)
@property
def name(self) -> str:
"""The patient's name."""
return self.get_subfield(3)
@property
def additional(self) -> tuple[str, ...]:
"""Optional additional subfields."""
return tuple(self._local_patient_identification.split()[4:])
def get_subfield(self, idx: int) -> str:
"""
Access a subfield of the local patient identification field by index.
Parameters
----------
idx : int
The index of the subfield to access.
Returns
-------
str
The subfield at the specified index. If the index exceeds the actually
available number of subfields, the return value is `"X"`.
"""
subfields = self._local_patient_identification.split()
if len(subfields) <= idx:
return "X"
return subfields[idx]
class Recording:
"""
Object representation of the local recording identification.
Parsing from/to the string containing the local_recording_identification header
field is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may
not contain spaces.
Parameters
----------
startdate : datetime.date | None, default: None
The recording startdate.
hospital_administration_code : str, default: `"X"`
The hospital administration code of the investigation, e.g., EEG number or PSG
number.
investigator_technician_code : str, default: `"X"`
A code specifying the responsible investigator or technician.
equipment_code : str, default: `"X"`
A code specifying the used equipment.
additional : Sequence[str], default: `()`
Optional additional subfields. Will be stored in the header field separated by
spaces.
"""
def __init__(
self,
*,
startdate: datetime.date | None = None,
hospital_administration_code: str = "X",
investigator_technician_code: str = "X",
equipment_code: str = "X",
additional: Sequence[str] = (),
) -> None:
if startdate is None:
startdate_field = "X"
else:
startdate_field = encode_edfplus_date(startdate)
subfields = {
"startdate": startdate_field,
"hospital_administration_code": hospital_administration_code,
"investigator_technician_code": investigator_technician_code,
"equipment_code": equipment_code,
**{f"additional[{i}]": v for i, v in enumerate(additional)},
}
validate_subfields(subfields)
local_recording_identification = " ".join(("Startdate", *subfields.values()))
encode_str(local_recording_identification, 80)
self._local_recording_identification = local_recording_identification
def __repr__(self) -> str:
try:
return repr_from_init(self)
except Exception:
return repr(self._local_recording_identification)
@classmethod
def _from_str(cls, string: str) -> Recording:
encode_str(string, 80)
obj = object.__new__(cls)
obj._local_recording_identification = string
return obj
def _to_str(self) -> str:
return self._local_recording_identification
@property
def startdate(self) -> datetime.date:
"""The recording startdate."""
if not self._local_recording_identification.startswith("Startdate "):
raise ValueError(
f"Local recording identification field {self._local_recording_identification!r} does not follow EDF+ standard."
)
startdate_field = self.get_subfield(1)
if startdate_field == "X":
raise AnonymizedDateError("Recording startdate is not available ('X').")
return decode_edfplus_date(startdate_field)
@property
def hospital_administration_code(self) -> str:
"""The hospital administration code of the investigation."""
return self.get_subfield(2)
@property
def investigator_technician_code(self) -> str:
"""A code specifying the responsible investigator or technician."""
return self.get_subfield(3)
@property
def equipment_code(self) -> str:
"""A code specifying the used equipment."""
return self.get_subfield(4)
@property
def additional(self) -> tuple[str, ...]:
"""Optional additional subfields."""
return tuple(self._local_recording_identification.split()[5:])
def get_subfield(self, idx: int) -> str:
"""
Access a subfield of the local recording identification field by index.
Parameters
----------
idx : int
The index of the subfield to access. The first subfield (starting at
index 0) should always be "Startdate" according to the EDF+ spedification.
Returns
-------
str
The subfield at the specified index. If the index exceeds the actually
available number of subfields, the return value is `"X"`.
"""
subfields = self._local_recording_identification.split()
if len(subfields) <= idx:
return "X"
return subfields[idx]
class Edf:
"""Python representation of an EDF file.
EDF header fields are exposed as properties with appropriate data types (i.e.,
string, numeric, date, or time objects). Fields that might break the file on
modification (i.e., `version`, `bytes_in_header_record`, `reserved`,
`num_data_records`, `data_record_duration`, and `num_signals`) can not be set after
instantiation.
Note that the startdate has to be set via the parameter `recording`.
For writing an EDF file with a non-integer seconds duration, currently an
appropriate value for `data_record_duration` has to be provided manually.
Parameters
----------
signals : Sequence[EdfSignal]
The (non-annotation) signals to be contained in the EDF file.
patient : Patient | None, default: None
The "local patient identification", containing patient code, sex, birthdate,
name, and optional additional fields. If `None`, the field is set to `X X X X`
in accordance with EDF+ specs.
recording : Recording | None, default: None
The "local recording identification", containing recording startdate, hospital
administration code, investigator/technical code, equipment code, and optional
additional fields. If `None`, the field is set to `Startdate X X X X` in
accordance with EDF+ specs.
starttime : datetime.time | None, default: None
The starttime of the recording. If `None`, `00.00.00` is used. If `starttime`
contains microseconds, an EDF+C file is created.
data_record_duration : float | None, default: None
The duration of each data record in seconds. If `None`, an appropriate value is
chosen automatically.
annotations : Iterable[EdfAnnotation] | None, default: None
The annotations, consisting of onset, duration (optional), and text. If not
`None`, an EDF+C file is created.
"""
version = RawHeaderFieldInt(8)
"""EDF version, always `0`"""
local_patient_identification = RawHeaderFieldStr(80, is_settable=True)
"""
Unparsed string representation of the legacy local patient identification.
See also
--------
patient: Parsed representation, as a :class:`Patient` object.
"""
local_recording_identification = RawHeaderFieldStr(80, is_settable=True)
"""
Unparsed string representation of the legacy local recording identification.
See also
--------
recording: Parsed representation, as a :class:`Recording` object.
"""
_startdate = RawHeaderFieldDate(8, is_settable=True)
_starttime = RawHeaderFieldTime(8, is_settable=True)
bytes_in_header_record = RawHeaderFieldInt(8)
"""Number of bytes in the header record."""
reserved = RawHeaderFieldStr(44)
"""`"EDF+C"` for an EDF+C file, else `""`."""
num_data_records = RawHeaderFieldInt(8)
"""Number of data records in the recording."""
_data_record_duration = RawHeaderFieldFloat(8, is_settable=True)
_num_signals = RawHeaderFieldInt(4, is_settable=True)
def __init__(
self,
signals: Sequence[EdfSignal],
*,
patient: Patient | None = None,
recording: Recording | None = None,
starttime: datetime.time | None = None,
data_record_duration: float | None = None,
annotations: Iterable[EdfAnnotation] | None = None,
):
if not signals and not annotations:
raise ValueError("Edf must contain either signals or annotations")
if patient is None:
patient = Patient()
if recording is None:
recording = Recording()
if starttime is None:
starttime = datetime.time(0, 0, 0)
if data_record_duration is None:
data_record_duration = _calculate_data_record_duration(signals)
elif len(signals) == 0 and data_record_duration != 0:
raise ValueError(
"Data record duration must be zero for annotation-only files"
)
self._data_record_duration = data_record_duration
self._set_num_data_records_with_signals(signals)
self._version = Edf.version.encode(0)
self.local_patient_identification = patient._to_str()
self.local_recording_identification = recording._to_str()
self._set_startdate_with_recording(recording)
self._starttime = starttime.replace(microsecond=0)
self._reserved = Edf.reserved.encode("")
if starttime.microsecond and annotations is None:
warnings.warn("Creating EDF+C to store microsecond starttime.")
if annotations is not None or starttime.microsecond:
signals = (
*signals,
_create_annotations_signal(
annotations if annotations is not None else (),
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
subsecond_offset=starttime.microsecond / 1_000_000,
),
)
self._reserved = Edf.reserved.encode("EDF+C")
self._set_signals(signals)
def __repr__(self) -> str:
signals_text = f"{len(self.signals)} signal"
if len(self.signals) != 1:
signals_text += "s"
annotations_text = f"{len(self.annotations)} annotation"
if len(self.annotations) != 1:
annotations_text += "s"
return f"<Edf {signals_text} {annotations_text}>"
def _load_data(self, file: Path | io.BufferedReader | io.BytesIO) -> None:
lens = [signal.samples_per_data_record for signal in self._signals]
datarecord_len = sum(lens)
if not isinstance(file, Path):
datarecords = np.frombuffer(file.read(), dtype=np.int16)
else:
datarecords = np.memmap(
file,
dtype=np.int16,
mode="r",
offset=self.bytes_in_header_record,
)
datarecords.shape = (self.num_data_records, datarecord_len)
ends = np.cumsum(lens)
starts = ends - lens
for signal, start, end in zip(self._signals, starts, ends):
signal._digital = datarecords[:, start:end].flatten()
def _read_header(self, buffer: io.BufferedReader | io.BytesIO) -> None:
for header_name, length in get_header_fields(Edf):
setattr(self, "_" + header_name, buffer.read(length))
self._signals = self._parse_signal_headers(buffer.read(256 * self._num_signals))
@property
def signals(self) -> tuple[EdfSignal, ...]:
"""
Ordinary signals contained in the recording.
Annotation signals are excluded. Individual signals can not be removed, added,
or replaced by modifying this property. Use :meth:`Edf.append_signals`,
:meth:`Edf.drop_signals`, or :attr:`EdfSignal.data`, respectively.
"""
return tuple(s for s in self._signals if s.label != "EDF Annotations")
def _set_signals(self, signals: Sequence[EdfSignal]) -> None:
signals = tuple(signals)
self._set_num_data_records_with_signals(signals)
self._signals = signals
self._bytes_in_header_record = Edf.bytes_in_header_record.encode(
256 * (len(signals) + 1)
)
self._num_signals = len(signals)
if all(s.label == "EDF Annotations" for s in signals):
self._data_record_duration = 0
def _set_num_data_records_with_signals(
self,
signals: Sequence[EdfSignal],
) -> None:
if not signals:
num_data_records = 1
else:
signal_durations = [
round(len(s._digital) / s.sampling_frequency, 12) for s in signals
]
if any(v != signal_durations[0] for v in signal_durations[1:]):
raise ValueError(
f"Inconsistent signal durations (in seconds): {signal_durations}"
)
num_data_records = _calculate_num_data_records(
signal_durations[0],
self.data_record_duration,
)
signal_lengths = [len(s._digital) for s in signals]
if any(l % num_data_records for l in signal_lengths):
raise ValueError(
f"Not all signal lengths can be split into {num_data_records} data records: {signal_lengths}"
)
self._num_data_records = Edf.num_data_records.encode(num_data_records)
def _parse_signal_headers(self, raw_signal_headers: bytes) -> tuple[EdfSignal, ...]:
raw_headers_split: dict[str, list[bytes]] = {}
start = 0
for header_name, length in get_header_fields(EdfSignal):
end = start + length * self._num_signals
raw_header = raw_signal_headers[start:end]
raw_headers_split[header_name] = [
raw_header[i : length + i] for i in range(0, len(raw_header), length)
]
start = end
signals = []
for i in range(self._num_signals):
raw_signal_header = {
key: raw_headers_split[key][i] for key in raw_headers_split
}
try:
sampling_frequency = (
int(raw_signal_header["samples_per_data_record"])
/ self.data_record_duration
)
except ZeroDivisionError:
if raw_signal_header["_label"].rstrip() == b"EDF Annotations":
sampling_frequency = 0
signals.append(
EdfSignal._from_raw_header(sampling_frequency, **raw_signal_header)
)
return tuple(signals)
def write(self, target: Path | str | io.BufferedWriter | io.BytesIO) -> None:
"""
Write an Edf to a file or file-like object.
Parameters
----------
target : Path | str | io.BufferedWriter | io.BytesIO
The file location (path object or string) or file-like object to write to.
"""
if self.num_data_records == -1:
warnings.warn("num_data_records=-1, determining correct value from data")
num_data_records = _calculate_num_data_records(
len(self._signals[0]._digital) * self._signals[0].sampling_frequency,
self.data_record_duration,
)
else:
num_data_records = self.num_data_records
for signal in self._signals:
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
len(signal._digital) // num_data_records
)
header_records = []
for header_name, _ in get_header_fields(Edf):
header_records.append(getattr(self, "_" + header_name))
for header_name, _ in get_header_fields(EdfSignal):
for signal in self._signals:
header_records.append(getattr(signal, "_" + header_name))
header_record = b"".join(header_records)
lens = [signal.samples_per_data_record for signal in self._signals]
ends = np.cumsum(lens)
starts = ends - lens
data_record = np.empty((num_data_records, sum(lens)), dtype=np.int16)
for signal, start, end in zip(self._signals, starts, ends):
data_record[:, start:end] = signal._digital.reshape((-1, end - start))
if isinstance(target, str):
target = Path(target)
if isinstance(target, io.BufferedWriter):
target.write(header_record)
data_record.tofile(target)
elif isinstance(target, io.BytesIO):
target.write(header_record)
target.write(data_record.tobytes())
else:
with target.expanduser().open("wb") as file:
file.write(header_record)
data_record.tofile(file)
@property
def labels(self) -> tuple[str, ...]:
"""
The labels of all signals contained in the Edf.
Returns
-------
tuple[str, ...]
The labels, in order of the signals.
"""
return tuple(s.label for s in self.signals)
def get_signal(self, label: str) -> EdfSignal:
"""
Retrieve a single signal by its label.
The label has to be unique - a ValueError is raised if it is ambiguous or does
not exist.
Parameters
----------
label : str
A label identifying a single signal
Returns
-------
EdfSignal
The signal corresponding to the given label.
"""
count = self.labels.count(label)
if count == 0:
raise ValueError(
f"No signal with label {label!r}, possible options: {self.labels}"
)
if count > 1:
indices = [i for i, l in enumerate(self.labels) if l == label]
raise ValueError(f"Ambiguous label {label!r} identifies indices {indices}")
return self.signals[self.labels.index(label)]
@property
def patient(self) -> Patient:
"""
Parsed object representation of the local patient identification.
See :class:`Patient` for information on its attributes.
"""
return Patient._from_str(self.local_patient_identification)
@patient.setter
def patient(self, patient: Patient) -> None:
self.local_patient_identification = patient._to_str()
@property
def recording(self) -> Recording:
"""
Parsed object representation of the local recording identification.
See :class:`Recording` for information on its attributes.
"""
return Recording._from_str(self.local_recording_identification)
@recording.setter
def recording(self, recording: Recording) -> None:
self._set_startdate_with_recording(recording)
self.local_recording_identification = recording._to_str()
@property
def startdate(self) -> datetime.date:
"""
Recording startdate.
If the :attr:`local_recording_identification` conforms to the EDF+ standard, the
startdate provided there is used. If not, this falls back to the legacy
:attr:`startdate` field. If both differ, a warning is issued and the EDF+ field
is preferred. Raises an `AnonymizedDateError` if the EDF+ field is anonymized
(i.e., begins with `Startdate X`).
"""
with contextlib.suppress(Exception):
if self._startdate != self.recording.startdate:
warnings.warn(
f"Different values in startdate fields: {self._startdate}, {self.recording.startdate}"
)
try:
return self.recording.startdate
except AnonymizedDateError:
raise
except ValueError:
return self._startdate
@startdate.setter
def startdate(self, startdate: datetime.date) -> None:
self._startdate = startdate
try:
self.recording.startdate # noqa: B018
except AnonymizedDateError:
pass
except Exception:
return
recording_subfields = self.local_recording_identification.split()
recording_subfields[1] = encode_edfplus_date(startdate)
self.local_recording_identification = " ".join(recording_subfields)
@property
def _subsecond_offset(self) -> float:
try:
timekeeping_raw = self._timekeeping_signal._digital.tobytes()
first_data_record = timekeeping_raw[: timekeeping_raw.find(b"\x00") + 1]
return _EdfAnnotationsDataRecord.from_bytes(first_data_record).tals[0].onset
except StopIteration:
return 0
@property
def starttime(self) -> datetime.time:
"""
Recording starttime.
In EDF+ files, microsecond accuracy is supported.
"""
subsecond_offset = self._subsecond_offset
try:
return self._starttime.replace(
microsecond=round(subsecond_offset * 1000000)
)
except ValueError as e:
raise ValueError(
f"Subsecond offset in first annotation must be 0.X, is {subsecond_offset}"
) from e
@starttime.setter
def starttime(self, starttime: datetime.time) -> None:
onset_change = starttime.microsecond / 1000000 - self._subsecond_offset
self._starttime = starttime.replace(microsecond=0)
if starttime.microsecond != self.starttime.microsecond:
timekeeping_signal = self._timekeeping_signal
data_records = []
for data_record in timekeeping_signal._digital.reshape(
(-1, timekeeping_signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
for tal in annot_dr.tals:
tal.onset = round(tal.onset + onset_change, 12)
data_records.append(annot_dr.to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
timekeeping_signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
timekeeping_signal._sampling_frequency = (
maxlen // 2 * self.data_record_duration
)
timekeeping_signal._digital = np.frombuffer(raw, dtype=np.int16)
def _set_startdate_with_recording(self, recording: Recording) -> None:
try:
self._startdate = recording.startdate
except AnonymizedDateError:
self._startdate = datetime.date(1985, 1, 1)
@property
def data_record_duration(self) -> float:
"""Duration of each data record in seconds."""
return self._data_record_duration
def update_data_record_duration(
self,
data_record_duration: float,
method: Literal["strict", "pad", "truncate"] = "strict",
) -> None:
"""
Update the data record duration.
This operation will fail if the new duration is incompatible with the current
sampling frequencies.
Parameters
----------
data_record_duration : float
The new data record duration in seconds.
method : `{"strict", "pad", "truncate"}`, default: `"strict"`
How to handle the case where the new duration does not divide the Edf
duration evenly
- "strict": Raise a ValueError
- "pad": Pad the data with zeros to the next compatible duration. If zero
is outside the physical range, data is padded with the physical minimum.
- "truncate": Truncate the data to the previous compatible duration (might
lead to loss of data)
"""
if data_record_duration == self.data_record_duration:
return
if data_record_duration <= 0:
raise ValueError(
f"Data record duration must be positive, got {data_record_duration}"
)
if not self.signals:
raise ValueError(
"Data record duration must be zero for annotation-only files"
)
for signal in self.signals:
spr = signal.sampling_frequency * data_record_duration
if spr % 1:
raise ValueError(
f"Cannot set data record duration to {data_record_duration}: Incompatible sampling frequency {signal.sampling_frequency} Hz"
)
num_data_records = self._pad_or_truncate_signals(data_record_duration, method)
self._update_record_duration_in_annotation_signals(
data_record_duration, num_data_records
)
self._data_record_duration = data_record_duration
self._num_data_records = Edf.num_data_records.encode(num_data_records)
@property
def num_signals(self) -> int:
"""Return the number of signals, excluding annotation signals for EDF+."""
return len(self.signals)
def _pad_or_truncate_signals(
self, data_record_duration: float, method: Literal["strict", "pad", "truncate"]
) -> int:
if method == "pad":
new_duration = (
ceil(self.duration / data_record_duration) * data_record_duration
)
self._pad_or_truncate_data(new_duration)
return round(new_duration / data_record_duration)
if method == "truncate":
new_duration = (
floor(self.duration / data_record_duration) * data_record_duration
)
self._pad_or_truncate_data(new_duration)
return round(new_duration / data_record_duration)
return _calculate_num_data_records(self.duration, data_record_duration)
def _update_record_duration_in_annotation_signals(
self, data_record_duration: float, num_data_records: int
) -> None:
signals = list(self._signals)
for idx, signal in enumerate(self._signals):
if signal not in self._annotation_signals:
continue
annotations = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if signal is self._timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
signals[idx] = _create_annotations_signal(
[
EdfAnnotation(a.onset - self._subsecond_offset, a.duration, a.text)
for a in annotations
],
num_data_records=num_data_records,
data_record_duration=data_record_duration,
with_timestamps=signal is self._timekeeping_signal,
subsecond_offset=self._subsecond_offset,
)
self._signals = tuple(signals)
def _pad_or_truncate_data(self, new_duration: float) -> None:
for signal in self.signals:
n_samples = round(new_duration * signal.sampling_frequency)
diff = n_samples - len(signal._digital)
if diff > 0:
physical_pad_value = 0.0
if signal.physical_min > 0 or signal.physical_max < 0:
physical_pad_value = signal.physical_min
signal._set_data(
np.pad(signal.data, (0, diff), constant_values=physical_pad_value)
)
elif diff < 0:
signal._set_data(signal.data[:diff])
def anonymize(self) -> None:
"""
Anonymize a recording.
Header fields are modified as follows:
- local patient identification is set to `X X X X`
- local recording identification is set to `Startdate X X X X`
- startdate is set to `01.01.85`
- starttime is set to `00.00.00`
For EDF+ files, subsecond starttimes specified via an annotations signal are
removed.
"""
self.patient = Patient()
self.recording = Recording()
self.starttime = datetime.time(0, 0, 0)
def drop_signals(self, drop: Iterable[int | str]) -> None:
"""
Drop signals by index or label.
Signal indices (int) and labels (str) can be provided in the same iterable. For
ambiguous labels, all corresponding signals are dropped. Raises a ValueError if
at least one of the provided identifiers does not correspond to a signal.
Parameters
----------
drop : Iterable[int | str]
The signals to drop, identified by index or label.
"""
if isinstance(drop, str):
drop = [drop]
selected: list[EdfSignal] = []
dropped: list[int | str] = []
i = 0
for signal in self._signals:
if signal.label == "EDF Annotations":
selected.append(signal)
continue
if i in drop or signal.label in drop:
dropped.append(i)
dropped.append(signal.label)
else:
selected.append(signal)
i += 1
if not_dropped := set(drop) - set(dropped):
raise ValueError(f"No signal found with index/label {not_dropped}")
self._signals = tuple(selected)
self._bytes_in_header_record = Edf.bytes_in_header_record.encode(
256 * (len(selected) + 1)
)
self._num_signals = len(selected)
def append_signals(self, new_signals: EdfSignal | Iterable[EdfSignal]) -> None:
"""
Append one or more signal(s) to the Edf recording.
Every signal must be compatible with the current `data_record_duration` and all
signal durations must match the overall recording duration. For recordings
containing EDF+ annotation signals, the new signals are inserted after the last
ordinary (i.e. non-annotation) signal.
Parameters
----------
new_signals : EdfSignal | Iterable[EdfSignal]
The signal(s) to add.
"""
if isinstance(new_signals, EdfSignal):
new_signals = [new_signals]
last_ordinary_index = 0
for i, signal in enumerate(self._signals):
if signal.label != "EDF Annotations":
last_ordinary_index = i
self._set_signals(
[
*self._signals[: last_ordinary_index + 1],
*new_signals,
*self._signals[last_ordinary_index + 1 :],
]
)
@property
def _annotation_signals(self) -> Iterable[EdfSignal]:
return (signal for signal in self._signals if signal.label == "EDF Annotations")
@property
def _timekeeping_signal(self) -> EdfSignal:
return next(iter(self._annotation_signals))
@property
def duration(self) -> float:
"""Recording duration in seconds."""
return self.num_data_records * self.data_record_duration
@property
def annotations(self) -> tuple[EdfAnnotation, ...]:
"""
All annotations contained in the Edf, sorted chronologically.
Does not include timekeeping annotations.
"""
annotations: list[EdfAnnotation] = []
for i, signal in enumerate(self._annotation_signals):
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if i == 0:
# from https://www.edfplus.info/specs/edfplus.html#timekeeping:
# The first annotation of the first 'EDF Annotations' signal in each
# data record is empty, but its timestamp specifies how many seconds
# after the file startdate/time that data record starts.
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
subsecond_offset = self._subsecond_offset
annotations = [
EdfAnnotation(
round(ann.onset - subsecond_offset, 12), ann.duration, ann.text
)
for ann in annotations
]
return tuple(sorted(annotations))
def drop_annotations(self, text: str) -> None:
"""
Drop annotations with a given text.
Parameters
----------
text : str
All annotations whose text exactly matches this parameter are removed.
"""
for signal in self._annotation_signals:
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annotations = _EdfAnnotationsDataRecord.from_bytes(
data_record.tobytes()
)
annotations.drop_annotations_with_text(text)
data_record[:] = np.frombuffer(
annotations.to_bytes().ljust(len(data_record) * 2, b"\x00"),
dtype=np.int16,
)
def to_bytes(self) -> bytes:
"""
Convert an Edf to a `bytes` object.
Returns
-------
bytes
The binary representation of the Edf object (i.e., what a file created with
`Edf.write` would contain).
"""
stream = io.BytesIO()
self.write(stream)
stream.seek(0)
return stream.read()
def slice_between_seconds(
self,
start: float,
stop: float,
*,
keep_all_annotations: bool = False,
) -> None:
"""
Slice to the interval between two times.
The sample point corresponding to `stop` is excluded. `start` and `stop` are
given in seconds from recording start and have to correspond exactly to a sample
time in all non-annotation signals.
Parameters
----------
start : float
Start time in seconds from recording start.
stop : float
Stop time in seconds from recording start.
keep_all_annotations : bool, default: False
If set to `True`, annotations outside the selected time interval are kept.
"""
signals: list[EdfSignal] = []
self._verify_seconds_inside_recording_time(start)
self._verify_seconds_inside_recording_time(stop)
self._verify_seconds_coincide_with_sample_time(start)
self._verify_seconds_coincide_with_sample_time(stop)
self._num_data_records = Edf.num_data_records.encode(
int((stop - start) / self.data_record_duration)
)
for signal in self._signals:
if signal.label == "EDF Annotations":
signals.append(
self._slice_annotations_signal(
signal,
start=start,
stop=stop,
keep_all_annotations=keep_all_annotations,
)
)
else:
start_index = start * signal.sampling_frequency
stop_index = stop * signal.sampling_frequency
signal._digital = signal._digital[int(start_index) : int(stop_index)]
signals.append(signal)
self._set_signals(signals)
self._shift_startdatetime(int(start))
def slice_between_annotations(
self,
start_text: str,
stop_text: str,
*,
keep_all_annotations: bool = False,
) -> None:
"""
Slice to the interval between two EDF+ annotations.
The sample point corresponding to the onset of the annotation identified by
`stop_text` is excluded. `start_text` and `stop_text` each have to uniquely
identify a single annotation, whose onset corresponds exactly to a sample time
in all non-annotation signals.
Parameters
----------
start_text : str
Text identifying the start annotation.
stop_text : str
Text identifying the stop annotation.
keep_all_annotations : bool, default: False
If set to `True`, annotations outside the selected time interval are kept.
"""
self.slice_between_seconds(
self._get_annotation_by_text(start_text).onset,
self._get_annotation_by_text(stop_text).onset,
keep_all_annotations=keep_all_annotations,
)
def _get_annotation_by_text(self, text: str) -> EdfAnnotation:
matches = []
for annotation in self.annotations:
if annotation.text == text:
matches.append(annotation)
if len(matches) == 1:
return matches[0]
if len(matches) > 1:
raise ValueError(
f"Ambiguous annotation text {text!r}, found {len(matches)} matches"
)
raise ValueError(f"No annotation found with text {text!r}")
def _verify_seconds_inside_recording_time(self, seconds: float) -> None:
if not 0 <= seconds <= self.duration:
raise ValueError(
f"{seconds} is an invalid slice time for recording duration {self.duration}"
)
def _verify_seconds_coincide_with_sample_time(self, seconds: float) -> None:
for i, signal in enumerate(self.signals):
index = seconds * signal.sampling_frequency
if index != int(index):
raise ValueError(
f"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz"
)
def _shift_startdatetime(self, seconds: float) -> None:
timedelta = datetime.timedelta(seconds=seconds)
try:
startdate = self.startdate
startdate_anonymized = False
except AnonymizedDateError:
startdate = datetime.date.fromtimestamp(0)
startdate_anonymized = True
startdatetime = datetime.datetime.combine(startdate, self.starttime)
startdatetime += timedelta
if not startdate_anonymized:
self.startdate = startdatetime.date()
self.starttime = startdatetime.time()
def copy(self) -> Edf:
"""
Create a deep copy of the Edf.
Returns
-------
Edf
The copied Edf object.
"""
return copy.deepcopy(self)
def _slice_annotations_signal(
self,
signal: EdfSignal,
*,
start: float,
stop: float,
keep_all_annotations: bool,
) -> EdfSignal:
is_timekeeping_signal = signal == self._timekeeping_signal
annotations: list[EdfAnnotation] = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if is_timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
annotations = [
EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)
for a in annotations
if keep_all_annotations or start <= a.onset < stop
]
return _create_annotations_signal(
annotations,
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
with_timestamps=is_timekeeping_signal,
subsecond_offset=self._subsecond_offset + start - int(start),
)
def _create_annotations_signal(
annotations: Iterable[EdfAnnotation],
*,
num_data_records: int,
data_record_duration: float,
with_timestamps: bool = True,
subsecond_offset: float = 0,
) -> EdfSignal:
data_record_starts = np.arange(num_data_records) * data_record_duration
annotations = sorted(annotations)
data_records = []
for i, start in enumerate(data_record_starts):
end = start + data_record_duration
tals: list[_EdfTAL] = []
if with_timestamps:
tals.append(_EdfTAL(np.round(start + subsecond_offset, 12), None, [""]))
for ann in annotations:
if (
(i == 0 and ann.onset < 0)
or (i == (num_data_records - 1) and end <= ann.onset)
or (start <= ann.onset < end)
):
tals.append(
_EdfTAL(
np.round(ann.onset + subsecond_offset, 12),
ann.duration,
[ann.text],
)
)
data_records.append(_EdfAnnotationsDataRecord(tals).to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
divisor = data_record_duration if data_record_duration else 1
signal = EdfSignal(
np.arange(1.0), # placeholder signal, as argument `data` is non-optional
sampling_frequency=maxlen // 2 / divisor,
physical_range=(-32768, 32767),
)
signal._label = "EDF Annotations"
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
signal._digital = np.frombuffer(raw, dtype=np.int16).copy()
return signal
@dataclass
class _EdfTAL:
onset: float
duration: float | None
texts: list[str]
def to_bytes(self) -> bytes:
timing = encode_annotation_onset(self.onset)
if self.duration is not None: | timing += f"\x15{encode_annotation_duration(self.duration)}" | 11 | 2023-11-09 09:53:27+00:00 | 8k |
microsoft/folx | folx/hessian.py | [
{
"identifier": "JAC_DIM",
"path": "folx/api.py",
"snippet": "T = TypeVar(\"T\", bound=PyTree[Array])\nR = TypeVar(\"R\", bound=PyTree[Array])\nJAC_DIM = 0 # should be either 0 or -1. TODO: switching is not support.\n GENERAL = 0\n LINEAR_IN_FIRST = 1\n LINEAR_IN_ONE = 2 | LINEAR_IN_FIRST\n LINEAR = 4 | LINEAR_IN_ONE\n REDUCTION = 8\n MULTIPLICATION = 16 | LINEAR_IN_ONE\n DOT_PRODUCT = 32 | REDUCTION | MULTIPLICATION\n INDEXING = 64 | LINEAR\n SCATTER = 128\n JOIN_JVP = 256\nclass FwdJacobian(NamedTuple):\nclass FwdLaplArray(NamedTuple):\nclass FwdLaplArgs(NamedTuple):\nclass MergeFn(Protocol):\nclass ForwardLaplacianFns(NamedTuple):\nclass JvpFn(Protocol):\nclass CustomTraceJacHessianJac(Protocol):\nclass ForwardLaplacian(Protocol):\nclass FunctionFlags(IntFlag):\n def weak(self) -> bool:\n def unique_idx(self):\n def materialize_for_idx(self, idx, max_idx: int | None = None):\n def aggregate(x, indices):\n def get_index_mask(self, outputs):\n def get_indices(mask, out_mask):\n def data_shape(self):\n def construct_jac_for(self, idx):\n def dense_array(self) -> Array:\n def max_n(self) -> int:\n def as_dense(self):\n def dense_or_sparse(self) -> Array:\n def sparse(self) -> Array:\n def mask(self) -> np.ndarray:\n def ndim(self) -> int:\n def from_dense(cls, array):\n def __add__(self, other):\n def astype(self, dtype):\n def shape(self):\n def ndim(self):\n def dense_jacobian(self):\n def is_jacobian_weak(self):\n def sparse_jacobian(self):\n def jacobian_mask(self):\n def dense(self):\n def astype(self, dtype):\ndef IS_LPL_ARR(x):\ndef IS_LEAF(x):\n def x(self) -> Arrays:\n def jacobian(self) -> tuple[FwdJacobian, ...]:\n def dense_jacobian(self) -> Arrays:\n def sparse_jacobian(self) -> Arrays:\n def jacobian_mask(self):\n def all_jacobian_weak(self) -> bool:\n def any_jacobian_weak(self) -> bool:\n def dense(self):\n def laplacian(self) -> Arrays:\n def one_hot_sparse_jacobian(self):\n def __len__(self) -> int:\n def __call__(self, args: Arrays, extra: ExtraArgs) -> Arrays:\n def __call__(self, primals: Arrays, tangents: Arrays) -> tuple[Array, Array]:\n def __call__(self, args: FwdLaplArgs, extra_args: ExtraArgs, merge: MergeFn, materialize_idx: Array) -> PyTree[Array]:\n def __call__(self, *args: ArrayOrFwdLaplArray, sparsity_threshold: int , **kwargs) -> PyTree[ArrayOrFwdLaplArray]:"
},
{
"identifier": "add_vmap_jacobian_dim",
"path": "folx/utils.py",
"snippet": "def add_vmap_jacobian_dim(args: FwdLaplArgs, in_axes: FwdLaplArgs):\n \"\"\"\n Adds a new dimension to the given args.\n The new dimension is added to the jacobian of each array.\n \"\"\"\n return FwdLaplArgs(\n tuple(\n FwdLaplArray(\n x=ax, # type: ignore\n jacobian=(*(x + (x >= JAC_DIM) for x in ax), JAC_DIM), # type: ignore\n laplacian=ax, # type: ignore\n )\n for a, ax in zip(args.arrays, in_axes.arrays)\n )\n )"
},
{
"identifier": "array_wise_flat_wrap",
"path": "folx/utils.py",
"snippet": "def array_wise_flat_wrap(fn: ForwardFn, *x: Array):\n \"\"\"\n Wraps the given function such that it takes a flat arrays\n as input and returns a flat array as output.\n \"\"\"\n unravels = [jfu.ravel_pytree(x_)[1] for x_ in x]\n\n def new_fn(*flat_x: Array) -> Array:\n x = [unravel(flat_x_) for unravel, flat_x_ in zip(unravels, flat_x)]\n return jfu.ravel_pytree(fn(*x))[0] # type: ignore\n\n return new_fn"
},
{
"identifier": "flat_wrap",
"path": "folx/utils.py",
"snippet": "def flat_wrap(fn: ForwardFn, *x: Array):\n \"\"\"\n Wraps the given function such that it takes a flat array\n as input and returns a flat array as output. All inputs are expected\n to be concatenated.\n \"\"\"\n _, x_unravel = jfu.ravel_pytree(x)\n\n def new_fn(flat_x: Array) -> Array:\n x = x_unravel(flat_x)\n return jfu.ravel_pytree(fn(*x))[0] # type: ignore\n\n return new_fn"
},
{
"identifier": "get_reduced_jacobians",
"path": "folx/utils.py",
"snippet": "def get_reduced_jacobians(*jacs: FwdJacobian, idx: Array | np.ndarray | None):\n \"\"\"\n Takes a sequence of jacobians and returns a sequence of\n jacobians where only the shared indices are kept.\n \"\"\"\n if idx is None:\n data = [j.dense_array for j in jacs]\n data = extend_jacobians(*data, axis=JAC_DIM)\n data = [x.reshape(x.shape[0], -1) for x in data]\n else:\n data = [j.construct_jac_for(idx) for j in jacs]\n data = [x.reshape(len(idx), -1) for x in data]\n return data"
},
{
"identifier": "jac_jacT",
"path": "folx/utils.py",
"snippet": "def jac_jacT(first: FwdJacobian, other: FwdJacobian, idx: Array | np.ndarray | None):\n \"\"\"\n Computes outer product of the given jacobians.\n \"\"\"\n x, y = get_reduced_jacobians(first, other, idx=idx)\n return x @ y.T"
},
{
"identifier": "trace_jac_jacT",
"path": "folx/utils.py",
"snippet": "def trace_jac_jacT(first: FwdJacobian, other: FwdJacobian, idx: Array | np.ndarray | None):\n \"\"\"\n Computes the trace of the product of the given jacobians.\n \"\"\"\n x, y = get_reduced_jacobians(first, other, idx=idx)\n return trace_of_product(x, y)"
},
{
"identifier": "trace_of_product",
"path": "folx/utils.py",
"snippet": "def trace_of_product(mat1: Array, mat2: Array):\n \"\"\"\n Computes the trace of the product of the given matrices.\n \"\"\"\n # ij,ij->... is a faster way to compute the trace than tr(mat1@mat2)\n # since one can rewrite the trace as sum_ij mat1_ij * mat2_ij\n return jnp.einsum(\"...ij,...ij->...\", mat1, mat2)"
},
{
"identifier": "vmap_sequences_and_squeeze",
"path": "folx/utils.py",
"snippet": "def vmap_sequences_and_squeeze(arrs: PyTree[Array], in_axes: Axes = None):\n \"\"\"\n Returns two things:\n - a sequence of pytrees of the same structure as arrs but with\n vmap in_axes as leaves such that all arrays can be broadcasted\n to the same shape. in_axes are kept.\n - the source arrays with all axes that are 1 in all arrays removed.\n \"\"\"\n seqs = vmap_sequences(arrs, in_axes)\n sque_args = arg_squeeze_dims(arrs, in_axes)\n return seqs, sque_args"
}
] | import functools
import logging
import jax
import jax.flatten_util as jfu
import jax.numpy as jnp
import jax.tree_util as jtu
import jaxlib.xla_extension
import numpy as np
from typing import Callable, Sequence
from jax import core
from .api import (
JAC_DIM,
Array,
Axes,
CustomTraceJacHessianJac,
ExtraArgs,
ForwardFn,
FunctionFlags,
FwdJacobian,
FwdLaplArgs,
FwdLaplArray,
MergeFn,
PyTree,
)
from .utils import (
add_vmap_jacobian_dim,
array_wise_flat_wrap,
flat_wrap,
get_reduced_jacobians,
jac_jacT,
trace_jac_jacT,
trace_of_product,
vmap_sequences_and_squeeze,
) | 4,414 | return arrs
if arrs[0].ndim >= 1:
return [remove_fill(x, find_unique=find_unique) for x in arrs]
if find_unique:
arrs = np.unique(arrs)
return arrs[arrs >= 0] # type: ignore
def merge_and_populate(arrs: Sequence[np.ndarray], operation: Callable[[np.ndarray, np.ndarray], np.ndarray]):
"""
The arrays are assumed to be of the same shape. We look at the intersection of all arrays.
We then find the maximum intersection size and fill all arrays to that size.
Args:
- arrs: list of arrays
Returns:
- arrs: np.ndarray where only intersections are kept and all arrays are filled to the same size.
"""
result = jtu.tree_map(
lambda *x: functools.reduce(operation, tuple(x[1:]), x[0]),
*arrs,
is_leaf=lambda x: isinstance(x, np.ndarray)
)
sizes = jtu.tree_map(lambda x: x.size, result, is_leaf=lambda x: isinstance(x, np.ndarray))
max_size = np.max(jtu.tree_leaves(sizes))
result = jtu.tree_map(
lambda x: np.concatenate([x, np.full(max_size - x.size, -1, dtype=x.dtype)]),
result,
is_leaf=lambda x: isinstance(x, np.ndarray),
)
return np.asarray(result, dtype=int)
def find_materialization_idx(lapl_args: FwdLaplArgs, in_axes, flags: FunctionFlags, threshold: int):
if not lapl_args.any_jacobian_weak:
return None
# TODO: Rewrite this!! This is quity messy and inefficient.
# it assumes that we're only interested in the last dimension.
with core.new_main(core.EvalTrace, dynamic=True):
vmap_seq, (inp,) = vmap_sequences_and_squeeze(
([j.mask for j in lapl_args.jacobian],),
([j for j in add_vmap_jacobian_dim(lapl_args, FwdLaplArgs(in_axes)).jacobian],),
)
max_size = np.max([np.sum(j.unique_idx >= 0, dtype=int) for j in lapl_args.jacobian])
# This can be quite memory intensive, so we try to do it on the GPU and
# if that fails we just use the CPU. On the CPU this takes quite some time.
# TODO: work on a more memory efficient implementation!
unique_fn = functools.partial(jnp.unique, size=max_size + 1, fill_value=-1)
def idx_fn(x):
return jtu.tree_map(unique_fn, x)
for s in vmap_seq[::-1]:
idx_fn = jax.vmap(idx_fn, in_axes=s)
try:
# This path is more memory intensive by using the GPU to find uniques but
# potentially fails if the arrays are too large.
# +1 because we need to accomodate the -1.
arrs = np.asarray(idx_fn(inp), dtype=int)
except jaxlib.xla_extension.XlaRuntimeError:
logging.info(
"Failed to find unique elements on GPU, falling back to CPU. This will be slow."
)
with jax.default_device(jax.devices("cpu")[0]):
arrs = np.asarray(idx_fn(inp), dtype=int)
filtered_arrs = remove_fill(arrs, False)
if FunctionFlags.LINEAR_IN_ONE in flags:
# For off diagonal Hessians we only need to look at the intersection between
# all arrays rather than their union.
idx = merge_and_populate(filtered_arrs, np.intersect1d) # type: ignore
else:
idx = merge_and_populate(filtered_arrs, np.union1d) # type: ignore
idx = np.moveaxis(idx, -1, JAC_DIM)
if idx.shape[JAC_DIM] >= max_size or idx.shape[JAC_DIM] > threshold:
idx = None
return idx
def remove_zero_entries(lapl_args: FwdLaplArgs, materialize_idx: np.ndarray | None):
if materialize_idx is None:
return lapl_args, None, None
mask = (materialize_idx != -1).any(0)
if mask.sum() > 0.5 * mask.size:
# this is a heuristic to avoid having unnecessary indexing overhead for
# insufficiently sparse masks.
return lapl_args, materialize_idx, None
indices = np.where(mask)
new_mat_idx = materialize_idx[(slice(None), *indices)]
new_arrs = []
for arg in lapl_args.arrays:
brdcast_dims = np.where(np.array(arg.x.shape) == 1)[0]
idx = tuple(
0 if i in brdcast_dims else x
for i, x in enumerate(indices)
)
new_arrs.append(FwdLaplArray(
x=arg.x[idx],
jacobian=FwdJacobian(
data=arg.jacobian.data[(slice(None), *idx)],
x0_idx=arg.jacobian.x0_idx[(slice(None), *idx)], # type: ignore
),
laplacian=arg.laplacian[idx],
))
new_args = FwdLaplArgs(tuple(new_arrs))
return new_args, new_mat_idx, mask
def vmapped_jac_hessian_jac(
fwd: ForwardFn,
flags: FunctionFlags,
custom_jac_hessian_jac: CustomTraceJacHessianJac | None,
extra_args: ExtraArgs,
in_axes: Axes,
extra_in_axes: Axes,
merge: MergeFn,
sparsity_threshold: int,
lapl_args: FwdLaplArgs,
|
def general_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, materialize_idx: Array | None):
# It's conceptually easier to work with the flattened version of the
# Hessian, since we can then use einsum to compute the trace.
flat_fn = flat_wrap(fn, *args.x)
flat_x = jfu.ravel_pytree(args.x)[0]
out, unravel = jfu.ravel_pytree(fn(*args.x))
# We have to decide on an order in which we execute tr(HJJ^T).
# H will be of shape NxDxD, J is DxK where N could potentially be D.
# We will do the following:
# if K >= D, we compute
# JJ^T first and then the trace.
# if D < K, we compute HJ first and then the trace.
# We should also flatten our gradient tensor to a 2D matrix where the first dimension
# is the x0 dim and the second dim is the input dim.
grads_2d = get_reduced_jacobians(*args.jacobian, idx=materialize_idx)
grad_2d = jnp.concatenate([x.T for x in grads_2d], axis=0)
D, K = grad_2d.shape
if K > D:
# jax.hessian uses Fwd on Reverse AD
flat_hessian = jax.hessian(flat_fn)(flat_x)
flat_out = trace_of_product(flat_hessian, grad_2d @ grad_2d.T)
elif D > K:
# Directly copmute the trace of tr(HJJ^T)=tr(J^THJ)
@functools.partial(jax.vmap, in_axes=-1, out_axes=-1)
def vhvp(tangent):
def vjp(x):
@functools.partial(jax.vmap, in_axes=(None, -1), out_axes=-1)
def jvp(x, tangent):
return jax.jvp(flat_fn, (x,), (tangent,))[1]
return jvp(x, grad_2d)
return jax.jvp(vjp, (flat_x,), (tangent,))[1]
flat_out = jnp.trace(vhvp(grad_2d), axis1=-2, axis2=-1)
else:
# Implementation where we compute HJ and then the trace via
# the sum of hadamard product
@functools.partial(jax.vmap, in_axes=-1, out_axes=-1)
def hvp(tangent):
def jacobian(x):
return jax.jacrev(flat_fn)(x)
return jax.jvp(jacobian, (flat_x,), (tangent,))[1]
HJ = hvp(grad_2d) # N x D x K
flat_out = trace_of_product(HJ, grad_2d) # N x D x K and D x K
return unravel(flat_out)
def off_diag_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, materialize_idx: Array | None):
# if we know that a function is linear in one arguments, it's hessian must be off diagonal
# thus we can safe some computation by only computing the off diagonal part of the hessian.
assert len(args) == 2, "Off diag hessian only supports 2 args at the moment."
def flat_arr(x: FwdLaplArray) -> Array:
return jfu.ravel_pytree(x.x)[0]
flat_fn = array_wise_flat_wrap(fn, *args.x)
def jac_lhs(lhs, rhs):
return jax.jacobian(flat_fn, argnums=0)(lhs, rhs)
hessian = jax.jacobian(jac_lhs, argnums=1)(flat_arr(args.arrays[0]), flat_arr(args.arrays[1]))
flat_out = 2 * trace_of_product(
hessian, jac_jacT(args.arrays[0].jacobian, args.arrays[1].jacobian, materialize_idx)
)
unravel = jfu.ravel_pytree(fn(*args.x))[1]
return unravel(flat_out)
def mul_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, shared_idx: Array | None):
# For a dot product we know that the hessian looks like this:
# [0, I]
# [I, 0]
# where I is the identity matrix of the same shape as the input.
assert len(args) == 2, "Dot product only supports two args."
flat_out = (
2 * trace_jac_jacT(args.arrays[0].jacobian, args.arrays[1].jacobian, shared_idx)[None]
)
unravel = jfu.ravel_pytree(fn(*args.x))[1]
return unravel(flat_out)
def remove_fill(arrs: np.ndarray, find_unique: bool = False):
"""
Remove the fill value from an array. As the tensors might not be shaped correctly
afterwards, we reduce all the leading dimensions by lists.
Args:
- arrs: array to remove fill value from
Returns:
- arrs: nested lists of arrays without fill value
"""
if arrs.size == 0:
return arrs
if arrs[0].ndim >= 1:
return [remove_fill(x, find_unique=find_unique) for x in arrs]
if find_unique:
arrs = np.unique(arrs)
return arrs[arrs >= 0] # type: ignore
def merge_and_populate(arrs: Sequence[np.ndarray], operation: Callable[[np.ndarray, np.ndarray], np.ndarray]):
"""
The arrays are assumed to be of the same shape. We look at the intersection of all arrays.
We then find the maximum intersection size and fill all arrays to that size.
Args:
- arrs: list of arrays
Returns:
- arrs: np.ndarray where only intersections are kept and all arrays are filled to the same size.
"""
result = jtu.tree_map(
lambda *x: functools.reduce(operation, tuple(x[1:]), x[0]),
*arrs,
is_leaf=lambda x: isinstance(x, np.ndarray)
)
sizes = jtu.tree_map(lambda x: x.size, result, is_leaf=lambda x: isinstance(x, np.ndarray))
max_size = np.max(jtu.tree_leaves(sizes))
result = jtu.tree_map(
lambda x: np.concatenate([x, np.full(max_size - x.size, -1, dtype=x.dtype)]),
result,
is_leaf=lambda x: isinstance(x, np.ndarray),
)
return np.asarray(result, dtype=int)
def find_materialization_idx(lapl_args: FwdLaplArgs, in_axes, flags: FunctionFlags, threshold: int):
if not lapl_args.any_jacobian_weak:
return None
# TODO: Rewrite this!! This is quity messy and inefficient.
# it assumes that we're only interested in the last dimension.
with core.new_main(core.EvalTrace, dynamic=True):
vmap_seq, (inp,) = vmap_sequences_and_squeeze(
([j.mask for j in lapl_args.jacobian],),
([j for j in add_vmap_jacobian_dim(lapl_args, FwdLaplArgs(in_axes)).jacobian],),
)
max_size = np.max([np.sum(j.unique_idx >= 0, dtype=int) for j in lapl_args.jacobian])
# This can be quite memory intensive, so we try to do it on the GPU and
# if that fails we just use the CPU. On the CPU this takes quite some time.
# TODO: work on a more memory efficient implementation!
unique_fn = functools.partial(jnp.unique, size=max_size + 1, fill_value=-1)
def idx_fn(x):
return jtu.tree_map(unique_fn, x)
for s in vmap_seq[::-1]:
idx_fn = jax.vmap(idx_fn, in_axes=s)
try:
# This path is more memory intensive by using the GPU to find uniques but
# potentially fails if the arrays are too large.
# +1 because we need to accomodate the -1.
arrs = np.asarray(idx_fn(inp), dtype=int)
except jaxlib.xla_extension.XlaRuntimeError:
logging.info(
"Failed to find unique elements on GPU, falling back to CPU. This will be slow."
)
with jax.default_device(jax.devices("cpu")[0]):
arrs = np.asarray(idx_fn(inp), dtype=int)
filtered_arrs = remove_fill(arrs, False)
if FunctionFlags.LINEAR_IN_ONE in flags:
# For off diagonal Hessians we only need to look at the intersection between
# all arrays rather than their union.
idx = merge_and_populate(filtered_arrs, np.intersect1d) # type: ignore
else:
idx = merge_and_populate(filtered_arrs, np.union1d) # type: ignore
idx = np.moveaxis(idx, -1, JAC_DIM)
if idx.shape[JAC_DIM] >= max_size or idx.shape[JAC_DIM] > threshold:
idx = None
return idx
def remove_zero_entries(lapl_args: FwdLaplArgs, materialize_idx: np.ndarray | None):
if materialize_idx is None:
return lapl_args, None, None
mask = (materialize_idx != -1).any(0)
if mask.sum() > 0.5 * mask.size:
# this is a heuristic to avoid having unnecessary indexing overhead for
# insufficiently sparse masks.
return lapl_args, materialize_idx, None
indices = np.where(mask)
new_mat_idx = materialize_idx[(slice(None), *indices)]
new_arrs = []
for arg in lapl_args.arrays:
brdcast_dims = np.where(np.array(arg.x.shape) == 1)[0]
idx = tuple(
0 if i in brdcast_dims else x
for i, x in enumerate(indices)
)
new_arrs.append(FwdLaplArray(
x=arg.x[idx],
jacobian=FwdJacobian(
data=arg.jacobian.data[(slice(None), *idx)],
x0_idx=arg.jacobian.x0_idx[(slice(None), *idx)], # type: ignore
),
laplacian=arg.laplacian[idx],
))
new_args = FwdLaplArgs(tuple(new_arrs))
return new_args, new_mat_idx, mask
def vmapped_jac_hessian_jac(
fwd: ForwardFn,
flags: FunctionFlags,
custom_jac_hessian_jac: CustomTraceJacHessianJac | None,
extra_args: ExtraArgs,
in_axes: Axes,
extra_in_axes: Axes,
merge: MergeFn,
sparsity_threshold: int,
lapl_args: FwdLaplArgs, | ) -> PyTree[Array]: | 0 | 2023-11-07 16:32:46+00:00 | 8k |
shuttworth/NICE-SLAM-Easyread | run.py | [
{
"identifier": "config",
"path": "src/config.py",
"snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg, nice=True):"
},
{
"identifier": "NICE_SLAM",
"path": "src/NICE_SLAM.py",
"snippet": "class NICE_SLAM():\n \"\"\"\n NICE_SLAM main class.\n Mainly allocate shared resources, and dispatch mapping and tracking process.\n \"\"\"\n\n def __init__(self, cfg, args):\n # 初始化配置和参数\n self.cfg = cfg\n self.args = args\n self.nice = args.nice\n\n # 从配置中读取各种设置\n self.coarse = cfg['coarse']\n self.occupancy = cfg['occupancy']\n self.low_gpu_mem = cfg['low_gpu_mem']\n self.verbose = cfg['verbose']\n self.dataset = cfg['dataset']\n self.coarse_bound_enlarge = cfg['model']['coarse_bound_enlarge']\n\n # 设置输出目录\n if args.output is None:\n self.output = cfg['data']['output']\n else:\n self.output = args.output\n self.ckptsdir = os.path.join(self.output, 'ckpts')\n os.makedirs(self.output, exist_ok=True)\n os.makedirs(self.ckptsdir, exist_ok=True)\n os.makedirs(f'{self.output}/mesh', exist_ok=True)\n\n # 读取相机配置\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][\n 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy']\n self.update_cam()\n\n # 初始化模型\n model = config.get_model(cfg, nice=self.nice)\n self.shared_decoders = model\n\n # 加载其他配置\n self.scale = cfg['scale']\n\n self.load_bound(cfg)\n if self.nice:\n self.load_pretrain(cfg)\n self.grid_init(cfg)\n else:\n self.shared_c = {}\n\n # need to use spawn\n # 设置多进程启动方法\n try:\n mp.set_start_method('spawn', force=True)\n except RuntimeError:\n pass\n\n # 初始化帧读取器\n self.frame_reader = get_dataset(cfg, args, self.scale)\n self.n_img = len(self.frame_reader)\n\n # 初始化估计的相机位姿列表\n self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4))\n self.estimate_c2w_list.share_memory_()\n\n # 初始化真实的相机位姿列表\n self.gt_c2w_list = torch.zeros((self.n_img, 4, 4))\n self.gt_c2w_list.share_memory_()\n\n # 初始化其他共享内存变量\n self.idx = torch.zeros((1)).int()\n self.idx.share_memory_()\n self.mapping_first_frame = torch.zeros((1)).int()\n self.mapping_first_frame.share_memory_()\n # the id of the newest frame Mapper is processing\n self.mapping_idx = torch.zeros((1)).int()\n self.mapping_idx.share_memory_()\n self.mapping_cnt = torch.zeros((1)).int() # counter for mapping\n self.mapping_cnt.share_memory_()\n\n # 将共享变量移至指定设备并共享内存\n for key, val in self.shared_c.items():\n val = val.to(self.cfg['mapping']['device'])\n val.share_memory_()\n self.shared_c[key] = val\n \n # 初始化渲染器、网格生成器、日志记录器\n self.shared_decoders = self.shared_decoders.to(\n self.cfg['mapping']['device'])\n self.shared_decoders.share_memory()\n self.renderer = Renderer(cfg, args, self)\n self.mesher = Mesher(cfg, args, self)\n self.logger = Logger(cfg, args, self)\n self.mapper = Mapper(cfg, args, self, coarse_mapper=False)\n\n # 初始化映射器和追踪器\n if self.coarse:\n self.coarse_mapper = Mapper(cfg, args, self, coarse_mapper=True)\n self.tracker = Tracker(cfg, args, self)\n\n # 打印输出描述\n self.print_output_desc()\n\n def print_output_desc(self):\n # 打印输出信息,在上方的__init__里调用\n print(f\"INFO: The output folder is {self.output}\")\n if 'Demo' in self.output:\n print(\n f\"INFO: The GT, generated and residual depth/color images can be found under \" +\n f\"{self.output}/vis/\")\n else:\n print(\n f\"INFO: The GT, generated and residual depth/color images can be found under \" +\n f\"{self.output}/tracking_vis/ and {self.output}/mapping_vis/\")\n print(f\"INFO: The mesh can be found under {self.output}/mesh/\")\n print(f\"INFO: The checkpoint can be found under {self.output}/ckpt/\")\n\n # 根据预处理配置更新相机的内参,这可能包括调整图像大小或裁剪边缘,在__init__里调用\n def update_cam(self):\n \"\"\"\n Update the camera intrinsics according to pre-processing config, \n such as resize or edge crop.\n \"\"\"\n # resize the input images to crop_size (variable name used in lietorch) \n # 检查配置中是否有 crop_size 参数。如果有,它将调整相机的焦距和主点坐标以适应新的图像尺寸\n # sx 和 sy 是宽度和高度的缩放比例,分别用于调整焦距(fx, fy)和主点坐标(cx, cy)。最后,更新图像的宽度(W)和高度(H)为新的裁剪尺寸\n if 'crop_size' in self.cfg['cam']:\n crop_size = self.cfg['cam']['crop_size']\n sx = crop_size[1] / self.W\n sy = crop_size[0] / self.H\n self.fx = sx*self.fx\n self.fy = sy*self.fy\n self.cx = sx*self.cx\n self.cy = sy*self.cy\n self.W = crop_size[1]\n self.H = crop_size[0]\n\n # croping will change H, W, cx, cy, so need to change here\n # 检查配置中是否有 crop_edge 参数,用于裁剪图像边缘,如果 crop_edge 大于0(nice_slam.yaml里的crop_edge值是0),它将从图像的宽度和高度中减去两倍的 crop_edge 值,并相应地调整主点坐标\n if self.cfg['cam']['crop_edge'] > 0:\n self.H -= self.cfg['cam']['crop_edge']*2\n self.W -= self.cfg['cam']['crop_edge']*2\n self.cx -= self.cfg['cam']['crop_edge']\n self.cy -= self.cfg['cam']['crop_edge']\n\n # 加载和设置场景的边界参数,在__init__里调用\n def load_bound(self, cfg):\n \"\"\"\n Pass the scene bound parameters to different decoders and self.\n\n Args:\n cfg (dict): parsed config dict.\n \"\"\"\n # scale the bound if there is a global scaling factor\n # 从配置中读取边界参数,并将其转换为一个PyTorch张量。边界参数被乘以一个全局缩放因子 self.scale(nice_slam.yaml里的scale值是1),用于调整场景的大小\n self.bound = torch.from_numpy(\n np.array(cfg['mapping']['bound'])*self.scale)\n bound_divisible = cfg['grid_len']['bound_divisible']\n # enlarge the bound a bit to allow it divisible by bound_divisible\n # 调整边界的上限,使其可以被 bound_divisible 整除\n self.bound[:, 1] = (((self.bound[:, 1]-self.bound[:, 0]) /\n bound_divisible).int()+1)*bound_divisible+self.bound[:, 0]\n # 如果执行的是nice-slam的算法\n if self.nice:\n self.shared_decoders.bound = self.bound\n self.shared_decoders.middle_decoder.bound = self.bound\n self.shared_decoders.fine_decoder.bound = self.bound\n self.shared_decoders.color_decoder.bound = self.bound\n # 如果粗层场景表达是coarse,给乘以一个额外的扩大因子 self.coarse_bound_enlarge,粗粒度解码器需要处理更大范围的场景数据\n if self.coarse:\n self.shared_decoders.coarse_decoder.bound = self.bound*self.coarse_bound_enlarge\n\n # 加载预先训练的ConvOnet参数,在__init__里调用\n # ConvONet论文:https://arxiv.org/pdf/2003.04618.pdf\n def load_pretrain(self, cfg):\n \"\"\"\n Load parameters of pretrained ConvOnet checkpoints to the decoders.\n\n Args:\n cfg (dict): parsed config dict\n \"\"\"\n\n if self.coarse:\n # ckpt加载coarse权重(从yaml的pretrained_decoders里)\n ckpt = torch.load(cfg['pretrained_decoders']['coarse'],\n map_location=cfg['mapping']['device'])\n # 初始化一个空字典,用于存储调整后的权重\n coarse_dict = {}\n # 遍历模型权重,只处理解码器的权重,排除编码器的权重\n for key, val in ckpt['model'].items():\n if ('decoder' in key) and ('encoder' not in key):\n key = key[8:]\n coarse_dict[key] = val\n # 加载权重到解码器\n self.shared_decoders.coarse_decoder.load_state_dict(coarse_dict)\n\n # ckpt加载middle_fine权重(从yaml的pretrained_decoders里)\n ckpt = torch.load(cfg['pretrained_decoders']['middle_fine'],\n map_location=cfg['mapping']['device'])\n middle_dict = {}\n fine_dict = {}\n for key, val in ckpt['model'].items():\n if ('decoder' in key) and ('encoder' not in key):\n if 'coarse' in key:\n key = key[8+7:]\n middle_dict[key] = val\n elif 'fine' in key:\n key = key[8+5:]\n fine_dict[key] = val\n self.shared_decoders.middle_decoder.load_state_dict(middle_dict)\n self.shared_decoders.fine_decoder.load_state_dict(fine_dict)\n\n # 分层特征网格初始化\n def grid_init(self, cfg):\n \"\"\"\n Initialize the hierarchical feature grids.\n\n Args:\n cfg (dict): parsed config dict.\n \"\"\"\n # 各项grid_len参数设置见yaml里的值\n if self.coarse:\n coarse_grid_len = cfg['grid_len']['coarse']\n self.coarse_grid_len = coarse_grid_len\n middle_grid_len = cfg['grid_len']['middle']\n self.middle_grid_len = middle_grid_len\n fine_grid_len = cfg['grid_len']['fine']\n self.fine_grid_len = fine_grid_len\n color_grid_len = cfg['grid_len']['color']\n self.color_grid_len = color_grid_len\n\n c = {}\n # 特征向量维度c_dim和场景边界xyz_len\n c_dim = cfg['model']['c_dim']\n xyz_len = self.bound[:, 1]-self.bound[:, 0]\n\n # If you have questions regarding the swap of axis 0 and 2,\n # please refer to https://github.com/cvg/nice-slam/issues/24\n\n if self.coarse:\n coarse_key = 'grid_coarse'\n coarse_val_shape = list(\n map(int, (xyz_len*self.coarse_bound_enlarge/coarse_grid_len).tolist()))\n coarse_val_shape[0], coarse_val_shape[2] = coarse_val_shape[2], coarse_val_shape[0]\n self.coarse_val_shape = coarse_val_shape\n val_shape = [1, c_dim, *coarse_val_shape]\n # 初始化一个具有特定形状和尺寸的零张量,并用标准正态分布填充,mid fine color同理;标准正态分布是深度学习中常见的权重初始化方法,有助于模型的训练和收敛\n coarse_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)\n c[coarse_key] = coarse_val\n \n middle_key = 'grid_middle'\n middle_val_shape = list(map(int, (xyz_len/middle_grid_len).tolist()))\n middle_val_shape[0], middle_val_shape[2] = middle_val_shape[2], middle_val_shape[0]\n self.middle_val_shape = middle_val_shape\n val_shape = [1, c_dim, *middle_val_shape]\n middle_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)\n c[middle_key] = middle_val\n\n fine_key = 'grid_fine'\n fine_val_shape = list(map(int, (xyz_len/fine_grid_len).tolist()))\n fine_val_shape[0], fine_val_shape[2] = fine_val_shape[2], fine_val_shape[0]\n self.fine_val_shape = fine_val_shape\n val_shape = [1, c_dim, *fine_val_shape]\n fine_val = torch.zeros(val_shape).normal_(mean=0, std=0.0001) # 精细网格使用更小的标准差进行初始化\n c[fine_key] = fine_val\n\n color_key = 'grid_color'\n color_val_shape = list(map(int, (xyz_len/color_grid_len).tolist()))\n color_val_shape[0], color_val_shape[2] = color_val_shape[2], color_val_shape[0]\n self.color_val_shape = color_val_shape\n val_shape = [1, c_dim, *color_val_shape]\n color_val = torch.zeros(val_shape).normal_(mean=0, std=0.01)\n c[color_key] = color_val\n\n # 所有初始化的网格(粗糙、中等、精细、颜色)被存储在一个字典 c 中,每个网格对应一个键(例如,'grid_coarse', 'grid_middle' 等)\n # 这个字典随后被赋值给 self.shared_c,使得这些网格可以在整个类的其他方法中被共享和访问\n self.shared_c = c\n\n def tracking(self, rank):\n \"\"\"\n Tracking Thread.\n\n Args:\n rank (int): Thread ID.\n \"\"\"\n\n # should wait until the mapping of first frame is finished\n # 一定要进行了初始化、确定了世界坐标系,才能够进行Tracking;\n # 而NICE-SLAM这样的NeRF based SLAM初始化的办法就是把第一帧图像拍摄的相机位置作为世界坐标系原点,然后先建图再去跟踪;\n # 在Tracking中,只优化camera pose,不优化hierarchical scene representation\n while (1):\n if self.mapping_first_frame[0] == 1:\n break\n time.sleep(1)\n\n self.tracker.run()\n\n def mapping(self, rank):\n \"\"\"\n Mapping Thread. (updates middle, fine, and color level)\n\n Args:\n rank (int): Thread ID.\n \"\"\"\n\n self.mapper.run()\n\n def coarse_mapping(self, rank):\n \"\"\"\n Coarse mapping Thread. (updates coarse level)\n\n Args:\n rank (int): Thread ID.\n \"\"\"\n\n self.coarse_mapper.run()\n\n def run(self):\n \"\"\"\n Dispatch Threads.\n \"\"\"\n\n processes = []\n for rank in range(3):\n # 当 rank 为 0 时,创建一个tracking进程;为1时,创建一个mapping进程;为2时,进行self.coarse的判断,通过则执行coarse_mapping线程\n if rank == 0:\n p = mp.Process(target=self.tracking, args=(rank, ))\n elif rank == 1:\n p = mp.Process(target=self.mapping, args=(rank, ))\n elif rank == 2:\n if self.coarse:\n p = mp.Process(target=self.coarse_mapping, args=(rank, ))\n else:\n continue\n p.start()\n processes.append(p)\n for p in processes:\n p.join()"
}
] | import argparse
import random
import numpy as np
import torch
from src import config
from src.NICE_SLAM import NICE_SLAM | 4,419 |
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
# setup_seed(20)
parser = argparse.ArgumentParser(
description='Arguments for running the NICE-SLAM/iMAP*.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one in config file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
# parse_args()访问在命令行中传递的所有参数
args = parser.parse_args()
|
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
# setup_seed(20)
parser = argparse.ArgumentParser(
description='Arguments for running the NICE-SLAM/iMAP*.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one in config file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
# parse_args()访问在命令行中传递的所有参数
args = parser.parse_args()
| cfg = config.load_config( | 0 | 2023-11-07 05:09:36+00:00 | 8k |
sb-ai-lab/HypEx | hypex/algorithms/faiss_matcher.py | [
{
"identifier": "check_repeats",
"path": "hypex/utils/metrics.py",
"snippet": "def check_repeats(index: np.array, silent: bool = False) -> float:\n \"\"\"Checks the fraction of duplicated indexes in the given array.\n\n Args:\n index:\n The array of indexes to check for duplicates\n silent:\n If silent, logger in info mode\n\n Returns:\n The fraction of duplicated index\n \"\"\"\n unique, counts = np.unique(index, return_counts=True)\n rep_frac = len(unique) / len(index) if len(unique) > 0 else 0\n\n if silent:\n logger.debug(f\"Fraction of duplicated indexes: {rep_frac: .2f}\")\n else:\n logger.info(f\"Fraction of duplicated indexes: {rep_frac: .2f}\")\n\n return round(rep_frac, 2)"
},
{
"identifier": "matching_quality",
"path": "hypex/utils/metrics.py",
"snippet": "def matching_quality(\n data: pd.DataFrame, treatment: str, features: list, features_psi: list, silent: bool = False\n) -> tuple:\n \"\"\"Wraps the functionality for estimating matching quality.\n\n Args:\n data:\n The dataframe of matched data\n treatment:\n The column determining control and test groups\n features:\n The list of features, ks-test and smd accept only numeric values\n features_psi:\n The list of features for calculating Population Stability Index (PSI)\n silent:\n If silent, logger in info mode\n\n\n Returns:\n A tuple of dataframes with estimated metrics for matched treated to control and control to treated\n\n \"\"\"\n orig_treated = data[data[treatment] == 1][features]\n orig_untreated = data[data[treatment] == 0][features]\n matched_treated = data[data[treatment] == 1][sorted([f + \"_matched\" for f in features])]\n matched_treated.columns = list(map(lambda x: x.replace(\"_matched\", \"\"), matched_treated.columns))\n matched_untreated = data[data[treatment] == 0][sorted([f + \"_matched\" for f in features])]\n matched_untreated.columns = list(map(lambda x: x.replace(\"_matched\", \"\"), matched_untreated.columns))\n\n psi_treated = data[data[treatment] == 1][features_psi]\n psi_treated_matched = data[data[treatment] == 1][[f + \"_matched\" for f in features_psi]]\n psi_treated_matched.columns = [f + \"_treated\" for f in features_psi]\n psi_treated.columns = [f + \"_treated\" for f in features_psi]\n\n psi_untreated = data[data[treatment] == 0][features_psi]\n psi_untreated_matched = data[data[treatment] == 0][[f + \"_matched\" for f in features_psi]]\n psi_untreated.columns = [f + \"_untreated\" for f in features_psi]\n psi_untreated_matched.columns = [f + \"_untreated\" for f in features_psi]\n\n treated_smd_data = smd(orig_treated, matched_treated, silent)\n untreated_smd_data = smd(orig_untreated, matched_untreated, silent)\n smd_data = pd.concat([treated_smd_data, untreated_smd_data], axis=1)\n smd_data.columns = [\"match_control_to_treat\", \"match_treat_to_control\"]\n\n treated_ks = ks(orig_treated, matched_treated, silent)\n untreated_ks = ks(orig_untreated, matched_untreated, silent)\n ks_dict = {k: [treated_ks[k], untreated_ks[k]] for k in treated_ks.keys()}\n ks_df = pd.DataFrame(data=ks_dict, index=range(2)).T\n ks_df.columns = [\"match_control_to_treat\", \"match_treat_to_control\"]\n\n report_cols = [\"column\", \"anomaly_score\", \"check_result\"]\n report_psi_treated = report(psi_treated, psi_treated_matched, silent=silent)[report_cols]\n report_psi_treated.columns = [col + \"_treated\" for col in report_cols]\n report_psi_untreated = report(psi_untreated, psi_untreated_matched, silent=silent)[report_cols]\n report_psi_untreated.columns = [col + \"_untreated\" for col in report_cols]\n report_psi = pd.concat(\n [report_psi_treated.reset_index(drop=True), report_psi_untreated.reset_index(drop=True)], axis=1\n )\n\n return report_psi, ks_df, smd_data"
}
] | import datetime as dt
import functools
import logging
import time
import faiss
import numpy as np
import pandas as pd
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Union
from scipy.stats import norm
from tqdm.auto import tqdm
from ..utils.metrics import check_repeats
from ..utils.metrics import matching_quality | 3,896 | df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)
df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)
df_matched = pd.concat([df_pred_treated, df_pred_untreated])
treated_x = self._create_features_matched_df(self.treated_index, True)
untreated_x = self._create_features_matched_df(self.untreated_index, False)
untreated_x = pd.concat([treated_x, untreated_x])
columns = list(untreated_x.columns) + list(df_matched.columns)
df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)
df_matched.columns = columns
return df_matched
def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the control group (ATC).
Effect on control group if it was affected
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATC, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATC")
df = df[df[self.treatment] == 0]
N_c = len(df)
ITT_c = df[outcome + POSTFIX_BIAS]
scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)
vars_c = np.repeat(ITT_c.var(), N_c) # conservative
atc = ITT_c.mean()
return atc, scaled_counts_c, vars_c
def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the treated (ATT).
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATT, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATT")
df = df[df[self.treatment] == 1]
N_t = len(df)
ITT_t = df[outcome + POSTFIX_BIAS]
scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)
vars_t = np.repeat(ITT_t.var(), N_t) # conservative
att = ITT_t.mean()
return att, scaled_counts_t, vars_t
def _calculate_ate_all_target(self, df: pd.DataFrame):
"""Creates dictionaries of all effect: ATE, ATC, ATT.
Args:
df:
Input dataframe
"""
logger.debug("Creating dicts of all effects: ATE, ATC, ATT")
att_dict = {}
atc_dict = {}
ate_dict = {}
N = len(df)
N_t = df[self.treatment].sum()
N_c = N - N_t
for outcome in self.outcomes:
att, scaled_counts_t, vars_t = self.calc_att(df, outcome)
atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)
ate = (N_c / N) * atc + (N_t / N) * att
att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)
atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)
ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)
ate_dict[outcome] = [
ate,
ate_se,
pval_calc(ate / ate_se),
ate - self.sigma * ate_se,
ate + self.sigma * ate_se,
]
atc_dict[outcome] = [
atc,
atc_se,
pval_calc(atc / atc_se),
atc - self.sigma * atc_se,
atc + self.sigma * atc_se,
]
att_dict[outcome] = [
att,
att_se,
pval_calc(att / att_se),
att - self.sigma * att_se,
att + self.sigma * att_se,
]
self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict
self.val_dict = ate_dict
| """Class that searches indexes."""
def timer(func):
"""Decorator to measure the execution time of a function.
Uses time.perf_counter() to determine the start and end times
of the decorated function and then prints the total execution time
Usage Example:
@timer
def example_function():
...
Args:
func: The function whose execution time is to be measured
Returns:
Wrapped version of the original function with added time measurement
"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
runtime = time.perf_counter() - start
print(f"{func.__name__} took {runtime:.4f} secs")
return result
return _wrapper
faiss.cvar.distance_compute_blas_threshold = 100000
POSTFIX = "_matched"
POSTFIX_BIAS = "_matched_bias"
logger = logging.getLogger("Faiss hypex")
console_out = logging.StreamHandler()
logging.basicConfig(
handlers=(console_out,),
format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s",
datefmt="%d.%m.%Y %H:%M:%S",
level=logging.INFO,
)
class FaissMatcher:
"""A class used to match instances using Faiss library."""
def __init__(
self,
df: pd.DataFrame,
outcomes: str,
treatment: str,
info_col: list,
features: [list, pd.DataFrame] = None,
group_col: str = None,
weights: dict = None,
sigma: float = 1.96,
validation: bool = None,
n_neighbors: int = 10,
silent: bool = True,
pbar: bool = True,
):
"""Construct all the necessary attributes.
Args:
df:
The input dataframe
outcomes:
The target column name
treatment:
The column name with treatment
info_col:
A list with informational column names
features:
A list with names of feature using to matching. Defaults to None
group_col:
The column for stratification. Defaults to None
weights:
Dict with wight of features to matching. If you would like that matching will be more for
1 feature and less for another one
sigma:
The significant level for confidence interval calculation Defaults to 1.96
validation:
The flag for validation of estimated ATE with default method `random_feature`
n_neighbors:
The number of neighbors to find for each object. Defaults to 10
silent:
Write logs in debug mode
pbar:
Display progress bar while get index
"""
self.n_neighbors = n_neighbors
if group_col is None:
self.df = df
else:
self.df = df.sort_values([treatment, group_col])
self.columns_del = [outcomes]
if info_col:
self.info_col = info_col
else:
self.info_col = []
if self.info_col is not None:
self.columns_del = self.columns_del + [x for x in self.info_col if x in self.df.columns]
self.outcomes = outcomes if type(outcomes) == list else [outcomes]
self.treatment = treatment
if features is None:
self.columns_match = list(
set([x for x in list(self.df.columns) if x not in self.info_col] + [self.treatment] + self.outcomes)
)
else:
try:
self.columns_match = features["Feature"].tolist() + [self.treatment] + self.outcomes
except TypeError:
self.columns_match = features + [self.treatment] + self.outcomes
self.features_quality = (
self.df.drop(columns=[self.treatment] + self.outcomes + self.info_col)
.select_dtypes(include=["int16", "int32", "int64", "float16", "float32", "float64"])
.columns
)
self.dict_outcome_untreated = {}
self.dict_outcome_treated = {}
self.group_col = group_col
self.weights = weights
self.treated_index = None
self.untreated_index = None
self.orig_treated_index = None
self.orig_untreated_index = None
self.results = {}
self.ATE = None
self.sigma = sigma
self.quality_dict = {}
self.rep_dict = None
self.validation = validation
self.silent = silent
self.pbar = pbar
self.tqdm = None
self.results = pd.DataFrame()
def __getstate__(self) -> dict:
"""Prepare the object for serialization.
This method is called when the object is about to be serialized.
It removes the `tqdm` attribute from the object's dictionary
because `tqdm` objects cannot be serialized.
Returns:
A copy of the object's dictionary with the `tqdm` attribute removed.
"""
state = self.__dict__.copy()
if "tqdm" in state:
del state["tqdm"]
return state
def __setstate__(self, state: dict):
"""Restore the object after deserialization.
This method is called when the object is deserialized.
It adds the `tqdm` attribute back to the object's dictionary
if the `pbar` attribute is True.
Args:
state:
The deserialized state of the object
"""
if "pbar" in state and state["pbar"]:
state["tqdm"] = None
self.__dict__.update(state)
def _get_split(self, df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):
"""Creates split data by treatment column.
Separate treatment column with 1 (treated) an 0 (untreated),
scales and transforms treatment column
Args:
df:
The input dataframe
Returns:
Tuple of dataframes - one for treated (df[self.treatment] == 1]) and
one for untreated (df[self.treatment] == 0]). Drops self.outcomes and
`self.treatment` columns
"""
logger.debug("Creating split data by treatment column")
treated = df[df[self.treatment] == 1].drop([self.treatment] + self.outcomes, axis=1)
untreated = df[df[self.treatment] == 0].drop([self.treatment] + self.outcomes, axis=1)
return treated, untreated
def _predict_outcome(self, std_treated: pd.DataFrame, std_untreated: pd.DataFrame):
"""Applies LinearRegression to input arrays.
Calculate biases of treated and untreated values,
creates dict of y - regular, matched and without bias.
Args:
std_treated:
The dataframe of treated data
std_untreated:
The dataframe of untreated data
"""
logger.debug("Predicting target by Linear Regression")
start_time = dt.datetime.now()
logger.debug("start --")
self.dict_outcome_untreated = {}
self.dict_outcome_treated = {}
df = self.df.drop(columns=self.info_col)
for outcome in self.outcomes:
y_untreated = df[df[self.treatment] == 0][outcome].to_numpy()
y_treated = df[df[self.treatment] == 1][outcome].to_numpy()
x_treated = std_treated.to_numpy()
x_untreated = std_untreated.to_numpy()
y_match_treated = np.array([y_untreated[idx].mean() for idx in self.treated_index])
y_match_untreated = np.array([y_treated[idx].mean() for idx in self.untreated_index])
x_match_treated = np.array([x_untreated[idx].mean(0) for idx in self.treated_index])
x_match_untreated = np.array([x_treated[idx].mean(0) for idx in self.untreated_index])
bias_coefs_c = bias_coefs(self.untreated_index, y_treated, x_treated)
bias_coefs_t = bias_coefs(self.treated_index, y_untreated, x_untreated)
bias_c = bias(x_untreated, x_match_untreated, bias_coefs_c)
bias_t = bias(x_treated, x_match_treated, bias_coefs_t)
y_match_treated_bias = y_treated - y_match_treated + bias_t
y_match_untreated_bias = y_match_untreated - y_untreated - bias_c
self.dict_outcome_untreated[outcome] = y_untreated
self.dict_outcome_untreated[outcome + POSTFIX] = y_match_untreated
self.dict_outcome_untreated[outcome + POSTFIX_BIAS] = y_match_untreated_bias
self.dict_outcome_treated[outcome] = y_treated
self.dict_outcome_treated[outcome + POSTFIX] = y_match_treated
self.dict_outcome_treated[outcome + POSTFIX_BIAS] = y_match_treated_bias
end_time = dt.datetime.now()
total = dt.datetime.strptime(str(end_time - start_time), "%H:%M:%S.%f").strftime("%H:%M:%S")
logger.debug(f"end -- [work time{total}]")
def _create_outcome_matched_df(self, dict_outcome: dict, is_treated: bool) -> pd.DataFrame:
"""Creates dataframe with outcomes values and treatment.
Args:
dict_outcome:
A dictionary containing outcomes
is_treated:
A boolean value indicating whether the outcome is treated or not
Returns:
A dataframe with matched outcome and treatment columns
"""
df_pred = pd.DataFrame(dict_outcome)
df_pred[self.treatment] = int(is_treated)
df_pred[self.treatment + POSTFIX] = int(not is_treated)
return df_pred
def _create_features_matched_df(self, index: np.ndarray, is_treated: bool) -> pd.DataFrame:
"""Creates matched dataframe with features.
Args:
index:
An array of indices
is_treated:
A boolean value indicating whether the outcome is treated or not
Returns:
A dataframe of matched features
"""
df = self.df.drop(columns=self.outcomes + self.info_col)
if self.group_col is None:
untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()
converted_index = [untreated_index[i] for i in index]
filtered = df.loc[df[self.treatment] == int(not is_treated)].values
untreated_df = pd.DataFrame(
data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=df.columns
) # добавить дату в данные и пофиксить баги с этим (тут ломалось)
if self.info_col is not None and len(self.info_col) != 1:
untreated_df["index"] = pd.Series(converted_index)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
else:
ids = self.df[df[self.treatment] == int(not is_treated)][self.info_col].values.ravel()
converted_index = [ids[i] for i in index]
untreated_df["index"] = pd.Series(converted_index)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
treated_df["index"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()
else:
df = df.sort_values([self.treatment, self.group_col])
untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()
converted_index = [untreated_index[i] for i in index]
filtered = df.loc[df[self.treatment] == int(not is_treated)]
cols_untreated = [col for col in filtered.columns if col != self.group_col]
filtered = filtered.drop(columns=self.group_col).to_numpy()
untreated_df = pd.DataFrame(
data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=cols_untreated
)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
grp = treated_df[self.group_col]
untreated_df[self.group_col] = grp
if self.info_col is not None and len(self.info_col) != 1:
untreated_df["index"] = pd.Series(converted_index)
else:
ids = (
self.df[df[self.treatment] == int(not is_treated)]
.sort_values([self.treatment, self.group_col])[self.info_col]
.values.ravel()
)
converted_index = [ids[i] for i in index]
untreated_df["index"] = pd.Series(converted_index)
treated_df["index"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()
untreated_df.columns = [col + POSTFIX for col in untreated_df.columns]
x = pd.concat([treated_df, untreated_df], axis=1).drop(
columns=[self.treatment, self.treatment + POSTFIX], axis=1
)
return x
def _create_matched_df(self) -> pd.DataFrame:
"""Creates matched df of features and outcome.
Returns:
Matched dataframe
"""
df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)
df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)
df_matched = pd.concat([df_pred_treated, df_pred_untreated])
treated_x = self._create_features_matched_df(self.treated_index, True)
untreated_x = self._create_features_matched_df(self.untreated_index, False)
untreated_x = pd.concat([treated_x, untreated_x])
columns = list(untreated_x.columns) + list(df_matched.columns)
df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)
df_matched.columns = columns
return df_matched
def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the control group (ATC).
Effect on control group if it was affected
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATC, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATC")
df = df[df[self.treatment] == 0]
N_c = len(df)
ITT_c = df[outcome + POSTFIX_BIAS]
scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)
vars_c = np.repeat(ITT_c.var(), N_c) # conservative
atc = ITT_c.mean()
return atc, scaled_counts_c, vars_c
def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the treated (ATT).
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATT, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATT")
df = df[df[self.treatment] == 1]
N_t = len(df)
ITT_t = df[outcome + POSTFIX_BIAS]
scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)
vars_t = np.repeat(ITT_t.var(), N_t) # conservative
att = ITT_t.mean()
return att, scaled_counts_t, vars_t
def _calculate_ate_all_target(self, df: pd.DataFrame):
"""Creates dictionaries of all effect: ATE, ATC, ATT.
Args:
df:
Input dataframe
"""
logger.debug("Creating dicts of all effects: ATE, ATC, ATT")
att_dict = {}
atc_dict = {}
ate_dict = {}
N = len(df)
N_t = df[self.treatment].sum()
N_c = N - N_t
for outcome in self.outcomes:
att, scaled_counts_t, vars_t = self.calc_att(df, outcome)
atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)
ate = (N_c / N) * atc + (N_t / N) * att
att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)
atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)
ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)
ate_dict[outcome] = [
ate,
ate_se,
pval_calc(ate / ate_se),
ate - self.sigma * ate_se,
ate + self.sigma * ate_se,
]
atc_dict[outcome] = [
atc,
atc_se,
pval_calc(atc / atc_se),
atc - self.sigma * atc_se,
atc + self.sigma * atc_se,
]
att_dict[outcome] = [
att,
att_se,
pval_calc(att / att_se),
att - self.sigma * att_se,
att + self.sigma * att_se,
]
self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict
self.val_dict = ate_dict
| def matching_quality(self, df_matched) -> Dict[str, Union[Dict[str, float], float]]: | 1 | 2023-11-01 08:58:57+00:00 | 8k |
TianrongChen/DMSB | runner.py | [
{
"identifier": "MMD_loss",
"path": "metrics.py",
"snippet": "class MMD_loss(torch.nn.Module):\n '''\n fork from: https://github.com/ZongxianLee/MMD_Loss.Pytorch\n '''\n def __init__(self, kernel_mul = 2.0, kernel_num = 5):\n super(MMD_loss, self).__init__()\n self.kernel_num = kernel_num\n self.kernel_mul = kernel_mul\n self.fix_sigma = None\n return\n def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):\n n_samples = int(source.size()[0])+int(target.size()[0])\n total = torch.cat([source, target], dim=0)\n\n total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))\n total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))\n L2_distance = ((total0-total1)**2).sum(2) \n if fix_sigma:\n bandwidth = fix_sigma\n else:\n bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)\n bandwidth /= kernel_mul ** (kernel_num // 2)\n bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(kernel_num)]\n kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]\n return sum(kernel_val)\n\n def forward(self, source, target):\n batch_size = int(source.size()[0])\n kernels = self.guassian_kernel(source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)\n XX = kernels[:batch_size, :batch_size]\n YY = kernels[batch_size:, batch_size:]\n XY = kernels[:batch_size, batch_size:]\n YX = kernels[batch_size:, :batch_size]\n loss = torch.mean(XX + YY - XY -YX)\n return loss"
},
{
"identifier": "compute_metrics",
"path": "metrics.py",
"snippet": "def compute_metrics(opt, pred_traj, ref_data, metrics, runner,stage):\n '''\n pred_traj: [batch_size, interval, data_dim] torch.Tensor\n ref_data: [num_dist, batch_size, data_dim], torch.Tensor, we use whole ref data which is similar to FID computation\n The reference data and prediction are all the marignals. We delete the leave one out (--LOO) marginal during the training, but we still evaluate them during here.\n '''\n sample_size = 1000\n dist_time = np.linspace(0, opt.interval-1, opt.num_dist).astype(int) #we delete a distribution when LOO during training, so num_dist is same as original marginal\n pred_idx = np.random.choice(pred_traj.shape[0], sample_size, replace=False) #random sample from batch\n pred_data = pred_traj[pred_idx][:,dist_time,0:opt.data_dim[0]] # [samp_bs, num_dist, data_dim] \n pred_data = pred_data.transpose(1,0,2)/opt.data_scale # [num_dist, samp_bs, data_dim]\n \n for metric_idx, metric in enumerate(metrics): #loop over metrics\n avg_metric = 0\n for idx,(pred,ref) in enumerate(zip(pred_data, ref_data)):\n if idx==0:\n continue # First marginal does not need to be evaluate. We do not generate it, just ground truth.\n if opt.metrics[metric_idx] == 'MMD': \n ref_idx = np.random.choice(ref.shape[0], sample_size, replace=False)\n ref = torch.Tensor(ref[ref_idx])\n pred = torch.Tensor(pred)\n\n loss = metric(pred,ref)\n avg_metric += loss\n print(util.green('{} for time{} is {}'.format(opt.metrics[metric_idx], idx,loss)))\n runner.log_tb(stage, loss, '{}_t{}'.format(opt.metrics[metric_idx],idx),'SB_forward')\n\n avg_metric = avg_metric/(opt.num_dist-1)\n print('AVERAGE {} IS {}'.format(opt.metrics[metric_idx],avg_metric))\n runner.log_tb(stage, avg_metric, '{}_avg'.format(opt.metrics[metric_idx]), 'SB_forward') \n\n return pred_data"
},
{
"identifier": "metric_build",
"path": "metrics.py",
"snippet": "def metric_build(opt):\n metrics = {\n 'SWD':sliced_wasserstein_distance,\n 'MMD':MMD_loss(),\n 'MWD':max_sliced_wasserstein_distance\n }\n return [metrics.get(key) for key in opt.metrics]"
},
{
"identifier": "compute_sb_DSB_train",
"path": "loss.py",
"snippet": "def compute_sb_DSB_train(opt, label, label_aux,dyn, ts, ms, policy_opt, return_z=False, itr=None):\n \"\"\" Implementation of Eq (18,19) in our main paper.\n \"\"\"\n dt = dyn.dt\n zs = policy_opt(ms,ts)\n g_ts = dyn.g(ts)\n g_ts = g_ts[:,None,None,None] if util.is_image_dataset(opt) else g_ts[:,None]\n loss = torch.nn.functional.mse_loss(g_ts*dt*zs,label)\n return loss, zs if return_z else loss"
}
] | import os, time, gc
import numpy as np
import torch
import torch.nn.functional as F
import policy
import sde
import data
import util
from torch.optim import SGD, RMSprop, Adagrad, AdamW, lr_scheduler, Adam
from torch.utils.tensorboard import SummaryWriter
from torch_ema import ExponentialMovingAverage
from metrics import MMD_loss,compute_metrics,metric_build
from loss import compute_sb_DSB_train
from ipdb import set_trace as debug | 4,071 | def sb_alternate_train_stage(self, opt, stage, epoch, direction, reused_sampler=None, rollout=False, resample=True):
policy_opt, policy_impt = {
'forward': [self.z_f, self.z_b], # train forwad, sample from backward
'backward': [self.z_b, self.z_f], # train backward, sample from forward
}.get(direction)
for ep in range(epoch):
# prepare training data
train_ms, train_zs, train_ts, train_labels = self.sample_train_data(
opt, policy_opt, policy_impt, reused_sampler, rollout=rollout, resample=resample
)
# train one epoch
policy_impt = freeze_policy(policy_impt)
policy_opt = activate_policy(policy_opt)
self.DSB_alternate_train_ep(
opt, ep, stage, direction, train_ms, train_zs, train_ts, train_labels, policy_opt, epoch
)
def DSB_alternate_train_ep(
self, opt, ep, stage, direction, train_xs, train_zs, train_ts, train_labels, policy, num_epoch
):
assert train_xs.shape[0] == opt.samp_bs
assert train_zs.shape[0] == opt.samp_bs
assert direction == policy.direction
optimizer, ema, sched = self.get_optimizer_ema_sched(policy)
use_amp=opt.use_amp
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
for it in range(opt.num_itr):
# -------- sample x_idx and t_idx \in [0, interval] --------
samp_m_idx = torch.randint(opt.samp_bs, (opt.train_bs_x,),device='cpu')
samp_t_idx = util.time_sample(opt.interval, policy.direction, opt.train_bs_t)
if opt.use_arange_t: samp_t_idx = util.time_arange(train_ts.shape[0], policy.direction)
# -------- build sample --------
sign=1 if policy.direction=='forward' else -1
ts = train_ts[samp_t_idx].detach().to(opt.device)
ms = train_xs[samp_m_idx][:, samp_t_idx, ...].to(opt.device)
zs_impt = train_zs[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
train_label = train_labels[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
optimizer.zero_grad(set_to_none=True)
# -------- handle for batch_x and batch_t ---------
# (batch, T, xdim) --> (batch*T, xdim)
ms = util.flatten_dim01(ms)
zs_impt = util.flatten_dim01(zs_impt)
train_label = util.flatten_dim01(train_label)
ts = ts.repeat(opt.train_bs_x)
assert ms.shape[0] == ts.shape[0]
assert zs_impt.shape[0] == ts.shape[0]
# -------- compute loss and backprop --------
with torch.cuda.amp.autocast(enabled=use_amp):
loss, zs = compute_sb_DSB_train(
opt, train_label, zs_impt,self.dyn, ts, ms, policy, return_z=True,itr=it
)
assert not torch.isnan(loss)
scaler.scale(loss).backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_norm(policy.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
optimizer.step()
ema.update()
if sched is not None: sched.step()
# -------- logging --------
zs = util.unflatten_dim01(zs, [len(samp_m_idx), len(samp_t_idx)])
zs_impt = zs_impt.reshape(zs.shape)
self.log_sb_alternate_train(
opt, it, ep, stage, loss, optimizer, direction, num_epoch
)
@torch.no_grad()
def evaluate(self, opt, stage, rollout=None, resample=False, ode_samp=False):
corrector = (lambda x,t: self.z_f(x,t) + self.z_b(x,t)) if opt.use_corrector else None
ODE_drift = (lambda x,t: 0.5*(self.z_b(x,t) - self.z_f(x,t))) if ode_samp else None
snapshot, ckpt = util.evaluate_stage(opt, stage)
snapshot=True
if ckpt:
self.v_dists = self.dyn.prev_v_boundary
keys = ['z_f','optimizer_f','ema_f','z_b','optimizer_b','ema_b','v_dists']
util.save_checkpoint(opt, self, keys, stage)
if snapshot:
print(util.blue('======Ploting visualization image======'))
for z in [self.z_b, self.z_f]:
z = freeze_policy(z)
ms, _, _, _,_ = self.dyn.sample_traj(
self.ts,
z,
save_traj=True,
corrector=corrector,
rollout=rollout,
resample=resample,
test=True,
ode_drift= ODE_drift
)
fn = "{}/xs-stage{}-{}".format(z.direction, stage,z.direction)
if opt.problem_name =='semicircle':
util.save_toy_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name == 'petal':
util.save_petal_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='gmm':
util.save_toy_seg_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='RNAsc' and z.direction=='forward':
|
def build_optimizer_ema_sched(opt, policy):
direction = policy.direction
optim_name = {
'Adam': Adam,
'AdamW': AdamW,
'Adagrad': Adagrad,
'RMSprop': RMSprop,
'SGD': SGD,
}.get(opt.optimizer)
optim_dict = {
"lr": opt.lr_f if direction=='forward' else opt.lr_b,
'weight_decay':opt.l2_norm,
}
if opt.optimizer == 'SGD':
optim_dict['momentum'] = 0.9
optimizer = optim_name(policy.parameters(), **optim_dict)
ema = ExponentialMovingAverage(policy.parameters(), decay=0.999)
if opt.lr_gamma < 1.0:
sched = lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=opt.lr_gamma)
else:
sched = None
return optimizer, ema, sched
def freeze_policy(policy):
for p in policy.parameters():
p.requires_grad = False
policy.eval()
return policy
def activate_policy(policy):
for p in policy.parameters():
p.requires_grad = True
policy.train()
return policy
class Runner():
def __init__(self,opt):
super(Runner,self).__init__()
self.start_time = time.time()
self.ts = torch.linspace(opt.t0, opt.T, opt.interval)
self.x_dists = data.build(opt)
# for visualize training data
if opt.problem_name == 'petal' or opt.problem_name =='RNAsc':
self.x_data = [dist.ground_truth for dist in self.x_dists]
#Initialize velocity, all gaussian
self.v_dists = {dist:opt.v_scale*torch.randn(opt.samp_bs, *opt.data_dim) for dist in range(len(self.x_dists))}
# Build metrics
self.metrics = metric_build(opt)
# build dynamics, forward (z_f) and backward (z_b) policies and corresponding optimizer
self.dyn = sde.build(opt, self.x_dists, self.v_dists)
self.z_f = policy.build(opt, self.dyn, 'forward') # p -> q
self.z_b = policy.build(opt, self.dyn, 'backward') # q -> p
self.optimizer_f, self.ema_f, self.sched_f = build_optimizer_ema_sched(opt, self.z_f)
self.optimizer_b, self.ema_b, self.sched_b = build_optimizer_ema_sched(opt, self.z_b)
if opt.load:
util.restore_checkpoint(opt, self, opt.load)
self.dyn.prev_v_boundary = self.v_dists
# tensorboard related things
if opt.log_tb:
self.it_f = 0
self.it_b = 0
self.writer =SummaryWriter(
log_dir =os.path.join('runs', opt.dir)
)
def update_count(self, direction):
if direction == 'forward':
self.it_f += 1
return self.it_f
elif direction == 'backward':
self.it_b += 1
return self.it_b
else:
raise RuntimeError()
def get_optimizer_ema_sched(self, z):
if z == self.z_f:
return self.optimizer_f, self.ema_f, self.sched_f
elif z == self.z_b:
return self.optimizer_b, self.ema_b, self.sched_b
else:
raise RuntimeError()
@torch.no_grad()
def sample_train_data(self, opt, policy_opt, policy_impt, reused_sampler, rollout=None, resample=None):
# reuse or sample training ms and zs
try:
reused_traj = next(reused_sampler)
train_ms, train_zs = reused_traj[:,0,...], reused_traj[:,1,...]
print('generate train data from [{}]!'.format(util.green('reused samper')))
except:
_, ema, _ = self.get_optimizer_ema_sched(policy_opt)
_, ema_impt, _ = self.get_optimizer_ema_sched(policy_impt)
with ema.average_parameters(), ema_impt.average_parameters():
policy_impt = freeze_policy(policy_impt)
policy_opt = freeze_policy(policy_opt)
corrector = (lambda x,t: policy_impt(x,t) + policy_opt(x,t)) if opt.use_corrector else None
ms, zs, _, labels, ts = self.dyn.sample_traj(self.ts, policy_impt, corrector=corrector, rollout=rollout, resample=resample)
train_ms = ms.detach().cpu(); del ms
train_zs = zs.detach().cpu(); del zs
train_labels = labels.detach().cpu(); del labels
train_ts = ts.detach().cpu(); del ts
print('generate train data from [{}]!'.format(util.red('sampling')))
assert train_ms.shape[0] == opt.samp_bs
assert train_ms.shape[1] == len(train_ts)
gc.collect()
return train_ms, train_zs, train_ts, train_labels
def sb_alternate_train(self, opt):
reused_sampler = self.evaluate(opt, 0, rollout = [0,opt.num_dist-1], resample=False,ode_samp=False)
bridge_ep = boundry_ep = opt.num_epoch
if opt.problem_name =='petal': bridge_ep = 1 #Special handle for petal. the distance between distributions are too close.
for stage in range(opt.num_stage):
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'backward', rollout = [0,opt.num_dist-1], resample=True # train backward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'forward', rollout = [0,opt.num_dist-1], resample=True # train forward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, bridge_ep, 'backward', rollout = [0,opt.num_dist-1], resample=False #Train K bridge backward
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'forward', rollout = [0,opt.num_dist-1], resample=True #Train forward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'backward', rollout = [0,opt.num_dist-1], resample=True #Train backward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, bridge_ep, 'forward', rollout = [0,opt.num_dist-1], resample=False #Train K bridge forward
)
reused_sampler = self.evaluate(opt, stage+1, rollout = [0,opt.num_dist-1],resample=False)
if opt.log_tb: self.writer.close()
def sb_alternate_train_stage(self, opt, stage, epoch, direction, reused_sampler=None, rollout=False, resample=True):
policy_opt, policy_impt = {
'forward': [self.z_f, self.z_b], # train forwad, sample from backward
'backward': [self.z_b, self.z_f], # train backward, sample from forward
}.get(direction)
for ep in range(epoch):
# prepare training data
train_ms, train_zs, train_ts, train_labels = self.sample_train_data(
opt, policy_opt, policy_impt, reused_sampler, rollout=rollout, resample=resample
)
# train one epoch
policy_impt = freeze_policy(policy_impt)
policy_opt = activate_policy(policy_opt)
self.DSB_alternate_train_ep(
opt, ep, stage, direction, train_ms, train_zs, train_ts, train_labels, policy_opt, epoch
)
def DSB_alternate_train_ep(
self, opt, ep, stage, direction, train_xs, train_zs, train_ts, train_labels, policy, num_epoch
):
assert train_xs.shape[0] == opt.samp_bs
assert train_zs.shape[0] == opt.samp_bs
assert direction == policy.direction
optimizer, ema, sched = self.get_optimizer_ema_sched(policy)
use_amp=opt.use_amp
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
for it in range(opt.num_itr):
# -------- sample x_idx and t_idx \in [0, interval] --------
samp_m_idx = torch.randint(opt.samp_bs, (opt.train_bs_x,),device='cpu')
samp_t_idx = util.time_sample(opt.interval, policy.direction, opt.train_bs_t)
if opt.use_arange_t: samp_t_idx = util.time_arange(train_ts.shape[0], policy.direction)
# -------- build sample --------
sign=1 if policy.direction=='forward' else -1
ts = train_ts[samp_t_idx].detach().to(opt.device)
ms = train_xs[samp_m_idx][:, samp_t_idx, ...].to(opt.device)
zs_impt = train_zs[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
train_label = train_labels[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
optimizer.zero_grad(set_to_none=True)
# -------- handle for batch_x and batch_t ---------
# (batch, T, xdim) --> (batch*T, xdim)
ms = util.flatten_dim01(ms)
zs_impt = util.flatten_dim01(zs_impt)
train_label = util.flatten_dim01(train_label)
ts = ts.repeat(opt.train_bs_x)
assert ms.shape[0] == ts.shape[0]
assert zs_impt.shape[0] == ts.shape[0]
# -------- compute loss and backprop --------
with torch.cuda.amp.autocast(enabled=use_amp):
loss, zs = compute_sb_DSB_train(
opt, train_label, zs_impt,self.dyn, ts, ms, policy, return_z=True,itr=it
)
assert not torch.isnan(loss)
scaler.scale(loss).backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_norm(policy.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
optimizer.step()
ema.update()
if sched is not None: sched.step()
# -------- logging --------
zs = util.unflatten_dim01(zs, [len(samp_m_idx), len(samp_t_idx)])
zs_impt = zs_impt.reshape(zs.shape)
self.log_sb_alternate_train(
opt, it, ep, stage, loss, optimizer, direction, num_epoch
)
@torch.no_grad()
def evaluate(self, opt, stage, rollout=None, resample=False, ode_samp=False):
corrector = (lambda x,t: self.z_f(x,t) + self.z_b(x,t)) if opt.use_corrector else None
ODE_drift = (lambda x,t: 0.5*(self.z_b(x,t) - self.z_f(x,t))) if ode_samp else None
snapshot, ckpt = util.evaluate_stage(opt, stage)
snapshot=True
if ckpt:
self.v_dists = self.dyn.prev_v_boundary
keys = ['z_f','optimizer_f','ema_f','z_b','optimizer_b','ema_b','v_dists']
util.save_checkpoint(opt, self, keys, stage)
if snapshot:
print(util.blue('======Ploting visualization image======'))
for z in [self.z_b, self.z_f]:
z = freeze_policy(z)
ms, _, _, _,_ = self.dyn.sample_traj(
self.ts,
z,
save_traj=True,
corrector=corrector,
rollout=rollout,
resample=resample,
test=True,
ode_drift= ODE_drift
)
fn = "{}/xs-stage{}-{}".format(z.direction, stage,z.direction)
if opt.problem_name =='semicircle':
util.save_toy_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name == 'petal':
util.save_petal_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='gmm':
util.save_toy_seg_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='RNAsc' and z.direction=='forward': | processed_data = compute_metrics(opt, ms.detach().cpu().numpy(), self.x_data, self.metrics, self, stage) | 1 | 2023-11-05 21:12:37+00:00 | 8k |
mileswyn/SAMIHS | train.py | [
{
"identifier": "get_config",
"path": "utils/config.py",
"snippet": "def get_config(task=\"BCIHM\"):\n if task == \"BCIHM\":\n return Config_BCIHM()\n elif task == \"Instance\":\n return Config_Intance()\n else:\n assert(\"We do not have the related dataset, please choose another task.\")"
},
{
"identifier": "get_eval",
"path": "utils/evaluation.py",
"snippet": "def get_eval(valloader, model, criterion, opt, args):\n if opt.eval_mode == \"mask_slice\":\n return eval_mask_slice2(valloader, model, criterion, opt, args)\n else:\n raise RuntimeError(\"Could not find the eval mode:\", opt.eval_mode)"
},
{
"identifier": "get_model",
"path": "models/model_dict.py",
"snippet": "def get_model(modelname=\"SAM\", args=None, opt=None):\n if modelname == \"SAM\":\n model = sam_model_registry['vit_b'](checkpoint=args.sam_ckpt)\n elif modelname == 'MedSAM':\n model = sam_model_registry['vit_b'](checkpoint=None)\n elif modelname == \"SAMIHS\":\n model = samihs_model_registry['vit_b'](args=args, checkpoint=args.sam_ckpt)\n else:\n raise RuntimeError(\"Could not find the model:\", modelname)\n return model"
},
{
"identifier": "BCIHM",
"path": "utils/data_ihs.py",
"snippet": "class BCIHM(Dataset):\n def __init__(self, dataset_path: str, split='train', joint_transform: Callable = None, fold=0, img_size=256, prompt = \"click\", class_id=1,\n one_hot_mask: int = False) -> None:\n self.fold = fold\n self.dataset_path = dataset_path\n self.one_hot_mask = one_hot_mask\n self.split = split\n id_list_file = os.path.join('./dataset/excel', 'BCIHM.csv')\n df = pd.read_csv(id_list_file, encoding='gbk')\n # id_list_file = os.path.join(dataset_path, 'MainPatient/{0}.txt'.format(split))\n if self.split == 'train':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] != self.fold and df['label'][id] > 0] \n self.gt_list = [label for id, label in enumerate(df['gt']) if df['fold'][id] != self.fold and df['label'][id] > 0]\n elif self.split == 'val':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] == self.fold]\n self.gt_list = [name for id, name in enumerate(df['gt']) if df['fold'][id] == self.fold]\n elif self.split == 'test':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] == self.fold]\n self.gt_list = [name for id, name in enumerate(df['gt']) if df['fold'][id] == self.fold]\n # self.ids = [id_.strip() for id_ in open(id_list_file)]\n self.prompt = prompt\n self.img_size = img_size\n self.class_id = class_id\n self.classes = 2\n if joint_transform:\n self.joint_transform = joint_transform\n else:\n to_tensor = T.ToTensor()\n self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y))\n\n def __len__(self):\n return len(self.img_list)\n \n def __getitem__(self, i):\n \"\"\"Get the images\"\"\"\n name = self.img_list[i]\n img_path = os.path.join(self.dataset_path, name)\n \n mask_name = self.gt_list[i]\n msk_path = os.path.join(self.dataset_path, mask_name)\n\n image = np.load(img_path)\n mask = np.load(msk_path)\n\n class_id = 1 # fixed since only one class of foreground \n mask[mask > 0] = 1\n\n image = np.clip(image, np.percentile(image, 0.05), np.percentile(image, 99.5)).astype(np.int16)\n mask = mask.astype(np.uint8)\n image, mask = correct_dims(image, mask)\n if self.joint_transform:\n image, mask, low_mask = self.joint_transform(image, mask)\n mask, low_mask = mask.squeeze(0), low_mask.squeeze(0)\n if self.one_hot_mask:\n assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'\n mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)\n\n # --------- make the point prompt ----------\n if self.prompt == 'click':\n point_label = 1\n if 'train' in self.split:\n pt, point_label = random_click(np.array(mask), class_id)\n bbox = random_bbox(np.array(mask), class_id, self.img_size)\n else:\n pt, point_label = fixed_click(np.array(mask), class_id)\n bbox = fixed_bbox(np.array(mask), class_id, self.img_size)\n pt = pt * self.img_size / 512\n mask[mask!=0] = 1\n mask[mask!=1] = 0\n low_mask[low_mask!=0] = 1\n low_mask[low_mask!=1] = 0\n point_labels = np.array(point_label)\n if self.one_hot_mask:\n assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'\n mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)\n\n low_mask = low_mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n bbox = bbox * self.img_size / 512\n return {\n 'image': image,\n 'label': mask,\n 'p_label': point_labels,\n 'pt': pt,\n 'bbox': bbox,\n 'low_mask':low_mask,\n 'image_name': name.split('/')[-1].split('.')[0] + '.png',\n 'class_id': class_id,\n }"
},
{
"identifier": "Transform2D_BCIHM",
"path": "utils/data_ihs.py",
"snippet": "class Transform2D_BCIHM:\n \"\"\"\n Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms,\n it is not enough to simply apply the same Transform from torchvision on the image and mask separetely.\n Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can\n be used, which will take care of the problems above.\n\n Args:\n crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will\n be taken.\n p_flip: float, the probability of performing a random horizontal flip.\n color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter.\n If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used.\n p_random_affine: float, the probability of performing a random affine transform using\n torchvision.transforms.RandomAffine.\n long_mask: bool, if True, returns the mask as LongTensor in label-encoded format.\n \"\"\"\n\n def __init__(self, mode='train', img_size=256, low_img_size=256, ori_size=256, crop=(32, 32), p_flip=0.5, p_rota=0.5, p_scale=0.0, p_gaussn=1.0, p_contr=0.0,\n p_gama=0.0, p_distor=0.0, color_jitter_params=(0.1, 0.1, 0.1, 0.1), p_random_affine=0,\n long_mask=False):\n self.mode = mode\n self.crop = crop\n self.p_flip = p_flip\n self.p_rota = p_rota\n self.p_scale = p_scale\n self.p_gaussn = p_gaussn\n self.p_gama = p_gama\n self.p_contr = p_contr\n self.p_distortion = p_distor\n self.img_size = img_size\n self.color_jitter_params = color_jitter_params\n if color_jitter_params:\n self.color_tf = T.ColorJitter(*color_jitter_params)\n self.p_random_affine = p_random_affine\n self.long_mask = long_mask\n self.low_img_size = low_img_size\n self.ori_size = ori_size\n\n def __call__(self, image, mask):\n\n # transforming to tensor\n image, mask = F.to_tensor(image), F.to_tensor(mask)\n\n # if self.mode == 'train':\n # # random horizontal flip\n # if np.random.rand() < self.p_flip:\n # image, mask = F.hflip(image), F.hflip(mask)\n\n # # random rotation\n # if np.random.rand() < self.p_rota:\n # angle = T.RandomRotation.get_params((-30, 30))\n # image, mask = F.rotate(image, angle), F.rotate(mask, angle)\n\n # # random add gaussian noise\n # if np.random.rand() < self.p_gaussn:\n # image, mask = image.cpu().numpy().transpose(1,2,0), image.cpu().numpy().transpose(1,2,0)\n # ns = np.random.randint(3, 15)\n # noise = np.random.normal(loc=0, scale=1, size=(512, 512, 1)) * ns\n # noise = noise.astype(int)\n # image = np.array(image) + noise\n # image, mask = F.to_tensor(image), F.to_tensor(mask)\n\n # else:\n # pass\n\n # transforming to tensor\n image, mask = F.resize(image, (self.img_size, self.img_size), InterpolationMode.BILINEAR), F.resize(mask, (self.ori_size, self.ori_size), InterpolationMode.NEAREST)\n low_mask = F.resize(mask, (self.low_img_size, self.low_img_size), InterpolationMode.NEAREST)\n image = (image - image.min()) / (image.max() - image.min())\n\n return image, mask, low_mask"
},
{
"identifier": "Instance",
"path": "utils/data_ihs.py",
"snippet": "class Instance(Dataset):\n def __init__(self, dataset_path: str, split='train', joint_transform: Callable = None, fold=0, img_size=256, prompt = \"click\", class_id=1,\n one_hot_mask: int = False) -> None:\n self.fold = fold\n self.dataset_path = dataset_path\n self.one_hot_mask = one_hot_mask\n self.split = split\n id_list_file = os.path.join('./dataset/excel', 'Instance.csv')\n df = pd.read_csv(id_list_file, encoding='gbk')\n # id_list_file = os.path.join(dataset_path, 'MainPatient/{0}.txt'.format(split))\n if self.split == 'train':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] != self.fold and df['label'][id] > 0] \n self.gt_list = [label for id, label in enumerate(df['gt']) if df['fold'][id] != self.fold and df['label'][id] > 0]\n elif self.split == 'val':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] == self.fold]\n self.gt_list = [name for id, name in enumerate(df['gt']) if df['fold'][id] == self.fold]\n elif self.split == 'test':\n self.img_list = [name for id, name in enumerate(df['img']) if df['fold'][id] == self.fold]\n self.gt_list = [name for id, name in enumerate(df['gt']) if df['fold'][id] == self.fold]\n # self.ids = [id_.strip() for id_ in open(id_list_file)]\n self.prompt = prompt\n self.img_size = img_size\n self.class_id = class_id\n self.classes = 2\n if joint_transform:\n self.joint_transform = joint_transform\n else:\n to_tensor = T.ToTensor()\n self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y))\n\n def __len__(self):\n return len(self.img_list)\n \n def __getitem__(self, i):\n \"\"\"Get the images\"\"\"\n name = self.img_list[i]\n img_path = os.path.join(self.dataset_path, name)\n \n mask_name = self.gt_list[i]\n msk_path = os.path.join(self.dataset_path, mask_name)\n\n image = np.load(img_path)\n mask = np.load(msk_path)\n\n class_id = 1 # fixed since only one class of foreground \n mask[mask > 0] = 1\n\n image = np.clip(image, np.percentile(image, 0.05), np.percentile(image, 99.5)).astype(np.int16)\n mask = mask.astype(np.uint8)\n image, mask = correct_dims(image, mask) \n if self.joint_transform:\n image, mask, low_mask = self.joint_transform(image, mask)\n mask, low_mask = mask.squeeze(0), low_mask.squeeze(0)\n if self.one_hot_mask:\n assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'\n mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)\n\n # --------- make the point prompt -----------------\n if self.prompt == 'click':\n point_label = 1\n if 'train' in self.split:\n pt, point_label = random_click(np.array(mask), class_id)\n bbox = random_bbox(np.array(mask), class_id, self.img_size)\n else:\n pt, point_label = fixed_click(np.array(mask), class_id)\n bbox = fixed_bbox(np.array(mask), class_id, self.img_size)\n pt = pt * self.img_size / 512\n mask[mask!=0] = 1\n mask[mask!=1] = 0\n low_mask[low_mask!=0] = 1\n low_mask[low_mask!=1] = 0\n point_labels = np.array(point_label)\n if self.one_hot_mask:\n assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'\n mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)\n\n low_mask = low_mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n bbox = bbox * self.img_size / 512\n return {\n 'image': image,\n 'label': mask,\n 'p_label': point_labels,\n 'pt': pt,\n 'bbox': bbox,\n 'low_mask':low_mask,\n 'image_name': name.split('/')[-1].split('.')[0] + '.png',\n 'class_id': class_id,\n }"
},
{
"identifier": "Transform2D_Instance",
"path": "utils/data_ihs.py",
"snippet": "class Transform2D_Instance:\n \"\"\"\n Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms,\n it is not enough to simply apply the same Transform from torchvision on the image and mask separetely.\n Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can\n be used, which will take care of the problems above.\n\n Args:\n crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will\n be taken.\n p_flip: float, the probability of performing a random horizontal flip.\n color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter.\n If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used.\n p_random_affine: float, the probability of performing a random affine transform using\n torchvision.transforms.RandomAffine.\n long_mask: bool, if True, returns the mask as LongTensor in label-encoded format.\n \"\"\"\n\n def __init__(self, img_size=256, low_img_size=256, ori_size=256, crop=(32, 32), p_flip=0.0, p_rota=0.0, p_scale=0.0, p_gaussn=0.0, p_contr=0.0,\n p_gama=0.0, p_distor=0.0, color_jitter_params=(0.1, 0.1, 0.1, 0.1), p_random_affine=0,\n long_mask=False):\n self.crop = crop\n self.p_flip = p_flip\n self.p_rota = p_rota\n self.p_scale = p_scale\n self.p_gaussn = p_gaussn\n self.p_gama = p_gama\n self.p_contr = p_contr\n self.p_distortion = p_distor\n self.img_size = img_size\n self.color_jitter_params = color_jitter_params\n if color_jitter_params:\n self.color_tf = T.ColorJitter(*color_jitter_params)\n self.p_random_affine = p_random_affine\n self.long_mask = long_mask\n self.low_img_size = low_img_size\n self.ori_size = ori_size\n\n def __call__(self, image, mask):\n\n # transforming to tensor\n image, mask = F.to_tensor(image), F.to_tensor(mask)\n\n # if self.mode == 'train':\n # # random horizontal flip\n # if np.random.rand() < self.p_flip:\n # image, mask = F.hflip(image), F.hflip(mask)\n\n # # random rotation\n # if np.random.rand() < self.p_rota:\n # angle = T.RandomRotation.get_params((-30, 30))\n # image, mask = F.rotate(image, angle), F.rotate(mask, angle)\n\n # # random add gaussian noise\n # if np.random.rand() < self.p_gaussn:\n # image, mask = image.cpu().numpy().transpose(1,2,0), image.cpu().numpy().transpose(1,2,0)\n # ns = np.random.randint(3, 15)\n # noise = np.random.normal(loc=0, scale=1, size=(512, 512, 1)) * ns\n # noise = noise.astype(int)\n # image = np.array(image) + noise\n # image, mask = F.to_tensor(image), F.to_tensor(mask)\n\n # else:\n # pass\n\n # transforming to tensor\n image, mask = F.resize(image, (self.img_size, self.img_size), InterpolationMode.BILINEAR), F.resize(mask, (self.ori_size, self.ori_size), InterpolationMode.NEAREST)\n low_mask = F.resize(mask, (self.low_img_size, self.low_img_size), InterpolationMode.NEAREST)\n image = (image - image.min()) / (image.max() - image.min())\n\n return image, mask, low_mask"
},
{
"identifier": "get_criterion",
"path": "utils/loss_functions/sam_loss.py",
"snippet": "def get_criterion(modelname='SAMIHS', opt=None):\n device = torch.device(opt.device)\n pos_weight = torch.ones([1]).cuda(device=device)*2\n if modelname == \"SAM\":\n criterion = Mask_BCE_loss(pos_weight=pos_weight)\n elif modelname == \"MedSAM\":\n criterion = Mask_BCE_loss(pos_weight=pos_weight)\n else:\n criterion = Mask_BD_and_BCE_loss(pos_weight=pos_weight)\n return criterion"
},
{
"identifier": "get_click_prompt",
"path": "utils/generate_prompts.py",
"snippet": "def get_click_prompt(datapack, opt):\n if 'pt' not in datapack:\n imgs, pt, masks = generate_click_prompt(imgs, masks)\n else:\n pt = datapack['pt']\n point_labels = datapack['p_label']\n\n point_coords = pt\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float32, device=opt.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=opt.device)\n if len(pt.shape) == 2:\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n pt = (coords_torch, labels_torch)\n return pt"
}
] | from ast import arg
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils.config import get_config
from utils.evaluation import get_eval
from models.model_dict import get_model
from utils.data_ihs import BCIHM, Transform2D_BCIHM, Instance, Transform2D_Instance
from utils.loss_functions.sam_loss import get_criterion
from utils.generate_prompts import get_click_prompt
from tqdm import tqdm
import os
import argparse
import torch
import torch.optim as optim
import numpy as np
import torch
import time
import random | 7,062 | if args.keep_log:
logtimestr = time.strftime('%m%d%H%M') # initialize the tensorboard for record the training process
boardpath = opt.tensorboard_path + args.modelname + opt.save_path_code + logtimestr
if not os.path.isdir(boardpath):
os.makedirs(boardpath)
TensorWriter = SummaryWriter(boardpath)
# ========== add the seed to make sure the results are reproducible ==========
seed_value = 1234 # the number of seed
np.random.seed(seed_value) # set random seed for numpy
random.seed(seed_value) # set random seed for python
os.environ['PYTHONHASHSEED'] = str(seed_value) # avoid hash random
torch.manual_seed(seed_value) # set random seed for CPU
torch.cuda.manual_seed(seed_value) # set random seed for one GPU
torch.cuda.manual_seed_all(seed_value) # set random seed for all GPU
torch.backends.cudnn.deterministic = True # set random seed for convolution
# ========== model and data preparation ==========
# register the sam model
model = get_model(args.modelname, args=args, opt=opt)
# opt.batch_size = args.batch_size * args.n_gpu
if args.task == 'BCIHM':
tf_train = Transform2D_BCIHM(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_BCIHM(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = BCIHM(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = BCIHM(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
elif args.task == 'Instance':
tf_train = Transform2D_Instance(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_Instance(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = Instance(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = Instance(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
else:
assert("We do not have the related dataset, please choose another task.")
trainloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=8, pin_memory=True)
valloader = DataLoader(val_dataset, batch_size=2, shuffle=False, num_workers=4, pin_memory=True)
model.to(device)
if opt.pre_trained:
checkpoint = torch.load(opt.load_path)
new_state_dict = {}
for k,v in checkpoint.items():
if k[:7] == 'module.':
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
model.load_state_dict(new_state_dict)
if args.n_gpu > 1:
model = nn.DataParallel(model)
if args.warmup:
b_lr = args.base_lr / args.warmup_period
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=b_lr, betas=(0.9, 0.999), weight_decay=0.1)
else:
b_lr = args.base_lr
optimizer = optim.Adam(model.parameters(), lr=args.base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5) #learning rate decay
criterion = get_criterion(modelname=args.modelname, opt=opt)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_params: {}".format(pytorch_total_params))
# [n for n, value in model.named_parameters() if value.requires_grad == True]
# ========== begin to train the model ==========
iter_num = 0
max_iterations = opt.epochs * len(trainloader)
best_dice, loss_log, dice_log = 0.0, np.zeros(opt.epochs+1), np.zeros(opt.epochs+1)
for epoch in range(opt.epochs):
# ---------- Train ----------
model.train()
optimizer.zero_grad()
train_losses = 0
with tqdm(total=len(trainloader), desc=f'Epoch {epoch}', unit='img') as pbar:
for batch_idx, (datapack) in enumerate(trainloader):
imgs = datapack['image'].to(dtype = torch.float32, device=opt.device)
masks = datapack['low_mask'].to(dtype = torch.float32, device=opt.device)
bbox = torch.as_tensor(datapack['bbox'], dtype=torch.float32, device=opt.device)
pt = get_click_prompt(datapack, opt)
# ---------- forward ----------
pred = model(imgs, pt, bbox)
train_loss = criterion(pred, masks)
# ---------- backward ----------
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
pbar.set_postfix(**{'loss (batch)': train_loss.item()})
train_losses += train_loss.item()
# ---------- Adjust learning rate ----------
if args.warmup and iter_num < args.warmup_period:
lr_ = args.base_lr * ((iter_num + 1) / args.warmup_period)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
else:
if args.warmup:
shift_iter = iter_num - args.warmup_period
assert shift_iter >= 0, f'Shift iter is {shift_iter}, smaller than zero'
lr_ = args.base_lr * (1.0 - shift_iter / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
pbar.update()
scheduler.step()
# ---------- Write log ----------
print('epoch [{}/{}], train loss:{:.4f}'.format(epoch, opt.epochs, train_losses / (batch_idx + 1)))
print('lr: ', optimizer.param_groups[0]['lr'])
if args.keep_log:
TensorWriter.add_scalar('train_loss', train_losses / (batch_idx + 1), epoch)
TensorWriter.add_scalar('learning rate', optimizer.state_dict()['param_groups'][0]['lr'], epoch)
loss_log[epoch] = train_losses / (batch_idx + 1)
# ---------- Validation ----------
if epoch % opt.eval_freq == 0:
model.eval()
|
def main():
# ========== parameters setting ==========
parser = argparse.ArgumentParser(description='Networks')
parser.add_argument('-task', required=True, default='BCIHM', help='task or dataset name')
parser.add_argument('-sam_ckpt', required=True, type=str, default='/data/wyn/Medical-SAM-Adapter/ckpt/sam_vit_b_01ec64.pth', help='Pretrained checkpoint of SAM')
parser.add_argument('-fold', required=True, type=int, default=0, help='task or dataset name')
parser.add_argument('--modelname', default='SAMIHS', type=str, help='type of model, e.g., SAM, SAMFull, MedSAM, MSA, SAMed, SAMUS...')
parser.add_argument('--encoder_input_size', type=int, default=1024, help='the image size of the encoder input, 1024 in SAM, MSA, SAMIHS, 512 in SAMUS')
parser.add_argument('--low_image_size', type=int, default=256, help='the output image embedding size')
parser.add_argument('--vit_name', type=str, default='vit_b', help='select the vit model for the image encoder of sam')
# TODO
parser.add_argument('--n_gpu', type=int, default=1, help='total gpu')
parser.add_argument('--base_lr', type=float, default=0.0005, help='segmentation network learning rate, 0.005 for SAMed, 0.0001 for MSA')
parser.add_argument('--warmup', type=bool, default=False, help='If activated, warp up the learning from a lower lr to the base_lr')
parser.add_argument('--warmup_period', type=int, default=250, help='Warp up iterations, only valid whrn warmup is activated')
parser.add_argument('--keep_log', type=bool, default=False, help='keep the loss&lr&dice during training or not')
args = parser.parse_args()
opt = get_config(args.task)
opt.mode = 'train'
device = torch.device(opt.device)
if args.keep_log:
logtimestr = time.strftime('%m%d%H%M') # initialize the tensorboard for record the training process
boardpath = opt.tensorboard_path + args.modelname + opt.save_path_code + logtimestr
if not os.path.isdir(boardpath):
os.makedirs(boardpath)
TensorWriter = SummaryWriter(boardpath)
# ========== add the seed to make sure the results are reproducible ==========
seed_value = 1234 # the number of seed
np.random.seed(seed_value) # set random seed for numpy
random.seed(seed_value) # set random seed for python
os.environ['PYTHONHASHSEED'] = str(seed_value) # avoid hash random
torch.manual_seed(seed_value) # set random seed for CPU
torch.cuda.manual_seed(seed_value) # set random seed for one GPU
torch.cuda.manual_seed_all(seed_value) # set random seed for all GPU
torch.backends.cudnn.deterministic = True # set random seed for convolution
# ========== model and data preparation ==========
# register the sam model
model = get_model(args.modelname, args=args, opt=opt)
# opt.batch_size = args.batch_size * args.n_gpu
if args.task == 'BCIHM':
tf_train = Transform2D_BCIHM(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_BCIHM(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = BCIHM(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = BCIHM(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
elif args.task == 'Instance':
tf_train = Transform2D_Instance(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_Instance(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = Instance(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = Instance(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
else:
assert("We do not have the related dataset, please choose another task.")
trainloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=8, pin_memory=True)
valloader = DataLoader(val_dataset, batch_size=2, shuffle=False, num_workers=4, pin_memory=True)
model.to(device)
if opt.pre_trained:
checkpoint = torch.load(opt.load_path)
new_state_dict = {}
for k,v in checkpoint.items():
if k[:7] == 'module.':
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
model.load_state_dict(new_state_dict)
if args.n_gpu > 1:
model = nn.DataParallel(model)
if args.warmup:
b_lr = args.base_lr / args.warmup_period
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=b_lr, betas=(0.9, 0.999), weight_decay=0.1)
else:
b_lr = args.base_lr
optimizer = optim.Adam(model.parameters(), lr=args.base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5) #learning rate decay
criterion = get_criterion(modelname=args.modelname, opt=opt)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_params: {}".format(pytorch_total_params))
# [n for n, value in model.named_parameters() if value.requires_grad == True]
# ========== begin to train the model ==========
iter_num = 0
max_iterations = opt.epochs * len(trainloader)
best_dice, loss_log, dice_log = 0.0, np.zeros(opt.epochs+1), np.zeros(opt.epochs+1)
for epoch in range(opt.epochs):
# ---------- Train ----------
model.train()
optimizer.zero_grad()
train_losses = 0
with tqdm(total=len(trainloader), desc=f'Epoch {epoch}', unit='img') as pbar:
for batch_idx, (datapack) in enumerate(trainloader):
imgs = datapack['image'].to(dtype = torch.float32, device=opt.device)
masks = datapack['low_mask'].to(dtype = torch.float32, device=opt.device)
bbox = torch.as_tensor(datapack['bbox'], dtype=torch.float32, device=opt.device)
pt = get_click_prompt(datapack, opt)
# ---------- forward ----------
pred = model(imgs, pt, bbox)
train_loss = criterion(pred, masks)
# ---------- backward ----------
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
pbar.set_postfix(**{'loss (batch)': train_loss.item()})
train_losses += train_loss.item()
# ---------- Adjust learning rate ----------
if args.warmup and iter_num < args.warmup_period:
lr_ = args.base_lr * ((iter_num + 1) / args.warmup_period)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
else:
if args.warmup:
shift_iter = iter_num - args.warmup_period
assert shift_iter >= 0, f'Shift iter is {shift_iter}, smaller than zero'
lr_ = args.base_lr * (1.0 - shift_iter / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
pbar.update()
scheduler.step()
# ---------- Write log ----------
print('epoch [{}/{}], train loss:{:.4f}'.format(epoch, opt.epochs, train_losses / (batch_idx + 1)))
print('lr: ', optimizer.param_groups[0]['lr'])
if args.keep_log:
TensorWriter.add_scalar('train_loss', train_losses / (batch_idx + 1), epoch)
TensorWriter.add_scalar('learning rate', optimizer.state_dict()['param_groups'][0]['lr'], epoch)
loss_log[epoch] = train_losses / (batch_idx + 1)
# ---------- Validation ----------
if epoch % opt.eval_freq == 0:
model.eval() | dices, mean_dice, _, val_losses = get_eval(valloader, model, criterion=criterion, opt=opt, args=args) | 1 | 2023-11-09 07:26:33+00:00 | 8k |
silicx/ObjectConceptLearning | models/OCRN_intervention.py | [
{
"identifier": "OcrnBaseModel",
"path": "models/base_models.py",
"snippet": "class OcrnBaseModel(nn.Module):\n\n def __init__(self, dataset, args):\n super(OcrnBaseModel, self).__init__()\n\n self.args = args\n self.num_obj = len(dataset.objs)\n self.num_attr = len(dataset.attrs)\n self.num_aff = dataset.num_aff\n\n # submodules\n if args.data_type == \"feature\":\n self.backbone = None\n self.feat_dim = dataset.feature_dim\n else:\n self.backbone, self.feat_dim = load_backbone(args.backbone_type, args.backbone_weight)\n\n # prior information\n prior_info = torch.load(f\"features/OCL_{args.backbone_type}/obj_prior.t7\")\n self.register_buffer(\"mean_obj_features\",\n prior_info[\"mean_obj_features\"] ) # (n_obj, dim)\n\n \n # preproc P(O)\n if args.obj_prior_type == \"default\":\n pass\n elif args.obj_prior_type == \"step\":\n sep = np.linspace(0, self.num_obj, args.obj_prior_bins, dtype=int).tolist()\n frequency = prior_info[\"freqency\"].numpy()\n order = frequency.argsort()\n for i,j in zip(sep[:-1], sep[1:]):\n ids = order[i:j]\n frequency[ids] = frequency[ids].mean()\n prior_info[\"freqency\"] = torch.from_numpy(frequency)\n else:\n raise NotImplementedError(args.obj_prior_type)\n\n self.register_buffer(\"obj_frequence\", \n prior_info[\"freqency\"] ) # (n_obj,)\n assert len(prior_info[\"freqency\"].size())==1\n \n\n CA = json.load(open('data/resources/OCL_category_annot.json'))\n self.register_buffer(\"category_attr\",\n torch.Tensor([ CA[o]['attr'] for o in dataset.objs ]).float() )\n self.register_buffer(\"category_aff\",\n torch.Tensor([ CA[o]['aff'] for o in dataset.objs ]).float() )\n\n print(f\"CA: attr={self.category_attr.shape}, aff={self.category_aff.shape}\")\n\n # loss weight\n if args.loss_class_weight:\n class_weight = json.load(open(\"data/resources/OCL_weight.json\"))\n self.register_buffer(\"obj_loss_wgt\", torch.tensor(class_weight[\"obj_weight\"]))\n self.register_buffer(\"attr_loss_wgt\", torch.tensor(class_weight[\"attr_weight\"]))\n self.register_buffer(\"aff_loss_wgt\", torch.tensor(class_weight[\"aff_weight\"]))\n else:\n self.obj_loss_wgt, self.attr_loss_wgt, self.aff_loss_wgt = None, None, None\n\n self.pos_weight_attr = None\n self.pos_weight_aff = None\n \n \n\n # losses\n if args.positive_bce:\n self.attr_bce = PositiveBCELoss(class_weight=self.attr_loss_wgt)\n self.aff_bce = PositiveBCELoss(class_weight=self.aff_loss_wgt)\n else:\n self.attr_bce = nn.BCEWithLogitsLoss(weight=self.attr_loss_wgt, pos_weight=self.pos_weight_attr)\n self.aff_bce = nn.BCEWithLogitsLoss(weight=self.aff_loss_wgt, pos_weight=self.pos_weight_aff)\n \n self.pair_prob_bce = nn.BCELoss()"
},
{
"identifier": "MLP",
"path": "models/base_models.py",
"snippet": "class MLP(nn.Module):\n \"\"\"Multi-layer perceptron, 1 layers as default. No activation after last fc\"\"\"\n\n def __init__(self, inp_dim, out_dim, hidden_layers=[], batchnorm=True, bias=True, out_relu=False, out_bn=False):\n super(MLP, self).__init__()\n\n inner_bias = bias and (not batchnorm)\n\n mod = []\n if hidden_layers is not None:\n last_dim = inp_dim\n for hid_dim in hidden_layers:\n mod.append(nn.Linear(last_dim, hid_dim, bias=inner_bias))\n if batchnorm:\n mod.append(nn.BatchNorm1d(hid_dim))\n mod.append(nn.ReLU(inplace=True))\n last_dim = hid_dim\n\n mod.append(nn.Linear(last_dim, out_dim, bias=bias))\n if out_bn:\n mod.append(nn.BatchNorm1d(out_dim))\n if out_relu:\n mod.append(nn.ReLU(inplace=True))\n\n self.mod = nn.Sequential(*mod)\n\n def forward(self, x):\n output = self.mod(x)\n return output"
},
{
"identifier": "ParallelMLP",
"path": "models/base_models.py",
"snippet": "class ParallelMLP(nn.Module):\n def __init__(self, inp_dim, out_dim, num_para, hidden_layers=[], layernorm=True, bias=True, share_last_fc=False, out_relu=False):\n super().__init__()\n inner_bias = bias\n\n mod = []\n if hidden_layers is not None:\n last_dim = inp_dim\n for hid_dim in hidden_layers:\n mod.append(ParallelLinear(last_dim, hid_dim, num_para, bias=inner_bias))\n\n if layernorm:\n mod.append(nn.LayerNorm(hid_dim))\n mod.append(nn.ReLU(inplace=True))\n last_dim = hid_dim\n\n if share_last_fc:\n mod.append(nn.Linear(last_dim, out_dim, bias=inner_bias))\n else:\n mod.append(ParallelLinear(last_dim, out_dim, num_para, bias=inner_bias))\n \n if out_relu:\n mod.append(nn.ReLU(inplace=True))\n\n self.mod = nn.Sequential(*mod)\n\n def forward(self, x):\n output = self.mod(x)\n return output"
},
{
"identifier": "Aggregator",
"path": "models/base_models.py",
"snippet": "class Aggregator(nn.Module):\n def __init__(self, method, args=None, num_para=None):\n super().__init__()\n self.support = ['sum', 'mean', 'max', 'concat']\n self.method = method\n\n if method not in self.support:\n raise NotImplementedError(\n 'Not supported aggregation method [%s].\\nWe only support: %s' % (method, self.support))\n\n if method == \"concat\":\n self.compression = nn.Linear(args.parallel_attr_rep_dim*num_para, args.aggr_rep_dim, bias=False)\n self.relu = nn.ReLU(inplace=True)\n\n if method == \"qkv\":\n raise NotImplementedError()\n\n def forward(self, tensor, mask=None, mask_method=\"zero\"):\n \"\"\"\n :param tensor: bz * n * dim\n :param mask: bz * n\n :return: bz * dim\n \"\"\"\n \n\n if mask is not None:\n if len(mask.size())==2:\n mask = mask.unsqueeze(-1)\n else:\n mask = mask.unsqueeze(-1).unsqueeze(0)\n\n if mask_method == \"zero\":\n tensor = tensor * mask\n elif mask_method == \"random\":\n rdm = torch.randn_like(tensor).to(tensor.device)\n tensor = torch.where(mask.expand_as(tensor), tensor, rdm)\n else:\n raise NotImplementedError(mask_method)\n\n if self.method == 'sum':\n return tensor.sum(1)\n elif self.method == 'mean':\n return tensor.mean(1)\n elif self.method == 'max':\n return tensor.max(1).values\n elif self.method == 'concat':\n out = tensor.reshape(tensor.shape[0], -1)\n out = self.compression(out)\n out = self.relu(out)\n return out"
},
{
"identifier": "build_counterfactual",
"path": "models/base_models.py",
"snippet": "def build_counterfactual(causal, num_attr, num_aff):\n '''\n :param causal: [ N, 3 ] (inst_id, attr_id, aff_id)\n :param num_attr:\n :param num_aff:\n :return:\n counterfactual_inst_id : tensor [ M ] index of instance in batch\n counterfactual_attr_mask: tensor [ M, num_attr ] which attr to be skipped\n counterfactual_aff_mask: tensor [ M, num_aff ] which aff will be affected after counterfactual\n '''\n orig_size = causal.shape[0]\n unique_inst_att_pair = torch.unique(causal[:, :2], dim=0)\n reduce_size = unique_inst_att_pair.shape[0]\n counterfactual_inst_id = unique_inst_att_pair[:, 0]\n counterfactual_attr_mask = onehot(unique_inst_att_pair[:, 1], num_attr, causal.device)\n space_mapping = torch.all(\n causal[:, :2].unsqueeze(0).expand(reduce_size, orig_size, 2) == \\\n unique_inst_att_pair[:, :2].unsqueeze(1).expand(reduce_size, orig_size, 2),\n dim=2\n ).float()\n counterfactual_aff_mask = torch.matmul(space_mapping, onehot(causal[:, 2], num_aff, causal.device))\n\n return counterfactual_inst_id, counterfactual_attr_mask, counterfactual_aff_mask"
},
{
"identifier": "CounterfactualHingeLoss",
"path": "models/base_models.py",
"snippet": "class CounterfactualHingeLoss(nn.Module):\n def __init__(self, margin=0.1):\n super().__init__()\n self.margin = margin\n\n def forward(self, cf_prob, orig_prob, gt_label, cf_label_mask):\n loss = torch.where(\n gt_label == 1,\n cf_prob - (orig_prob - self.margin),\n (orig_prob + self.margin) - cf_prob\n )\n # loss[loss < 0] = 0\n loss = nn.functional.relu(loss, inplace=True)\n\n loss = loss * cf_label_mask\n loss = loss.mean(0).sum()\n return loss"
}
] | from typing import final
from models.base_models import OcrnBaseModel, MLP, ParallelMLP, Aggregator, build_counterfactual, CounterfactualHingeLoss
import torch
import torch.nn as nn
import math | 4,500 |
self.parallel_attr_feat = ParallelMLP(
args.attr_out_rep_dim, args.parallel_attr_rep_dim, num_para=self.num_attr,
hidden_layers=args.fc_para_feat, layernorm=args.layernorm, out_relu=args.out_relu)
self.attr_auxIA_classifier = ParallelMLP(args.parallel_attr_rep_dim, 1, num_para=self.num_attr, hidden_layers=args.fc_cls,
layernorm=args.layernorm, share_last_fc=True)
self.attr_IA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_IA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
assert args.sep_CA_cls
self.attr_CA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_CA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.mseloss = torch.nn.MSELoss()
self.hinge = CounterfactualHingeLoss(args.counterfactual_margin)
def forward(self, batch, require_loss=True):
if self.backbone:
feature = self.backbone(batch["image"], batch["main_bbox"])
batch["gt_attr"] = torch.cat(batch["gt_attr"], 0)
batch["gt_aff"] = torch.cat(batch["gt_aff"], 0)
else:
feature = batch["image"]
batchsize = feature.size(0)
gt_all_CAttr_vec = self.category_attr
gt_all_CAff_vec = self.category_aff
# Attibute module
feat_CAttr = self.fc_feat2attr(self.mean_obj_features) # (n_obj, dim_attr)
feat_IAttr = self.attr_instantialize(
feat_CAttr, feature,
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
) # (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAttr = self.attr_inst_bn(feat_IAttr)
feat_mean_IAttr = torch.einsum("ijk,j->ik", feat_IAttr, self.obj_frequence)
# (bz, dim_attr)
logit_CAttr = self.attr_CA_classifier(feat_CAttr)
logit_IAttr = self.attr_IA_classifier(feat_mean_IAttr)
feat_parallel_IAttr = self.parallel_attr_feat(feat_mean_IAttr.unsqueeze(1).expand(-1,self.num_attr, -1))
logit_aux_IAttr = self.attr_auxIA_classifier(feat_parallel_IAttr).squeeze(-1)
# Affordance module
feat_aggr_IAttr = self.aggregator(feat_parallel_IAttr)
feat_CAff = self.fc_feat2aff(
torch.cat([self.mean_obj_features, feat_CAttr], 1)
) # (n_obj, dim_aff)
feat_IAff = self.aff_instantialize(
feat_CAff, torch.cat([feature, feat_aggr_IAttr], 1),
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
)
# (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAff = self.aff_inst_bn(feat_IAff)
feat_mean_IAff = torch.einsum("ijk,j->ik", feat_IAff, self.obj_frequence)
# (bz, dim_aff)
logit_CAff = self.aff_CA_classifier(feat_CAff)
logit_IAff = self.aff_IA_classifier(feat_mean_IAff)
prob_IAttr = torch.sigmoid(logit_IAttr)
prob_IAff = torch.sigmoid(logit_IAff)
if require_loss:
losses = {}
if self.args.lambda_attr > 0:
if self.args.lambda_cls_CA>0:
losses["loss_attr/CA_cls"] = self.attr_bce(logit_CAttr, gt_all_CAttr_vec)
if self.args.lambda_cls_IA>0:
losses["loss_attr/IA_cls"] = self.attr_bce(logit_IAttr, batch["gt_attr"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAttr = self.attr_IA_classifier(feat_IAttr)
losses["loss_attr/inst_IA_cls"] = self.attr_bce(
logit_inst_IAttr, batch["gt_attr"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_attr") for x in losses]):
losses["loss_attr/total"] = (
self.args.lambda_cls_CA * losses.get("loss_attr/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_attr/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_attr/inst_IA_cls", 0.) )
if self.args.lambda_aff > 0:
if self.args.lambda_cls_CA>0:
losses["loss_aff/CA_cls"] = self.aff_bce(logit_CAff, gt_all_CAff_vec)
if self.args.lambda_cls_IA>0:
losses["loss_aff/IA_cls"] = self.aff_bce(logit_IAff, batch["gt_aff"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAff = self.aff_IA_classifier(feat_IAff)
losses["loss_aff/inst_IA_cls"] = self.aff_bce(
logit_inst_IAff, batch["gt_aff"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_aff") for x in losses]):
losses["loss_aff/total"] = (
self.args.lambda_cls_CA * losses.get("loss_aff/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_aff/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_aff/inst_IA_cls", 0.) )
if self.args.lambda_cf > 0 and batch['gt_causal'].shape[0] > 0:
|
@final
class FullSelfAttention(nn.Module):
def __init__(self, feat_dim, cond_dim, hidden_dim, args):
""" output = f(input, condition)
in_dim/cond_dim/out_dim = dimension of input/condition/output
fc_in_hid/fc_cond_hid = hidden layers of fc after input/condition
fc_out_hid = hidden layers of fc before output
"""
super(FullSelfAttention, self).__init__()
fc_in_hid = args.fc_pre
fc_cond_hid = args.fc_att
fc_out_hid = args.fc_compress
self.fc_feat_Q = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_feat_V = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_feat_K = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_cond_Q = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.fc_cond_V = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.fc_cond_K = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.rtemp = 1.0/math.sqrt(hidden_dim)
self.fc_out = MLP(2*hidden_dim, feat_dim, fc_out_hid, args.batchnorm, out_relu=args.out_relu)
def forward(self, feat, cond, in_postproc=lambda x:x, cond_postproc=lambda x:x):
feat_Q = in_postproc( self.fc_feat_Q(feat) ) # (bz*obj, hid_dim)
feat_V = in_postproc( self.fc_feat_V(feat) )
feat_K = in_postproc( self.fc_feat_K(feat) )
cond_Q = cond_postproc( self.fc_cond_Q(cond) )
cond_V = cond_postproc( self.fc_cond_V(cond) )
cond_K = cond_postproc( self.fc_cond_K(cond) )
K_diff = (feat_K - cond_K) * self.rtemp
KQ_ff_fc = (feat_Q * K_diff).sum(-1) # (bz*obj, )
KQ_cf_cc = (cond_Q * K_diff).sum(-1)
feat_att_f = torch.sigmoid(KQ_ff_fc).unsqueeze(-1)
cond_att_f = torch.sigmoid(KQ_cf_cc).unsqueeze(-1)
V_diff = (feat_V - cond_V)
hid_feat = V_diff*feat_att_f + cond_V
hid_cond = V_diff*cond_att_f + cond_V
hidden = torch.cat([hid_feat, hid_cond], -1)
out = self.fc_out(hidden)
return out
# @final
class Model(OcrnBaseModel):
def __init__(self, dataset, args):
super(Model, self).__init__(dataset, args)
# model param
self.fc_feat2attr = MLP(self.feat_dim, args.attr_rep_dim, args.fc_feat2attr, args.batchnorm, out_relu=args.out_relu, out_bn=args.batchnorm)
self.fc_feat2aff = MLP(self.feat_dim + args.attr_rep_dim, args.aff_rep_dim, args.fc_feat2aff, args.batchnorm, out_relu=args.out_relu, out_bn=args.batchnorm)
self.attr_instantialize = FullSelfAttention(args.attr_rep_dim, self.feat_dim, args.attr_hidden_rep_dim, args=args)
self.aff_instantialize = FullSelfAttention(args.aff_rep_dim, self.feat_dim + args.aggr_rep_dim, args.aff_hidden_rep_dim, args=args)
self.aggregator = Aggregator(self.args.aggregation, args, self.num_attr)
self.parallel_attr_feat = ParallelMLP(
args.attr_out_rep_dim, args.parallel_attr_rep_dim, num_para=self.num_attr,
hidden_layers=args.fc_para_feat, layernorm=args.layernorm, out_relu=args.out_relu)
self.attr_auxIA_classifier = ParallelMLP(args.parallel_attr_rep_dim, 1, num_para=self.num_attr, hidden_layers=args.fc_cls,
layernorm=args.layernorm, share_last_fc=True)
self.attr_IA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_IA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
assert args.sep_CA_cls
self.attr_CA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_CA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.mseloss = torch.nn.MSELoss()
self.hinge = CounterfactualHingeLoss(args.counterfactual_margin)
def forward(self, batch, require_loss=True):
if self.backbone:
feature = self.backbone(batch["image"], batch["main_bbox"])
batch["gt_attr"] = torch.cat(batch["gt_attr"], 0)
batch["gt_aff"] = torch.cat(batch["gt_aff"], 0)
else:
feature = batch["image"]
batchsize = feature.size(0)
gt_all_CAttr_vec = self.category_attr
gt_all_CAff_vec = self.category_aff
# Attibute module
feat_CAttr = self.fc_feat2attr(self.mean_obj_features) # (n_obj, dim_attr)
feat_IAttr = self.attr_instantialize(
feat_CAttr, feature,
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
) # (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAttr = self.attr_inst_bn(feat_IAttr)
feat_mean_IAttr = torch.einsum("ijk,j->ik", feat_IAttr, self.obj_frequence)
# (bz, dim_attr)
logit_CAttr = self.attr_CA_classifier(feat_CAttr)
logit_IAttr = self.attr_IA_classifier(feat_mean_IAttr)
feat_parallel_IAttr = self.parallel_attr_feat(feat_mean_IAttr.unsqueeze(1).expand(-1,self.num_attr, -1))
logit_aux_IAttr = self.attr_auxIA_classifier(feat_parallel_IAttr).squeeze(-1)
# Affordance module
feat_aggr_IAttr = self.aggregator(feat_parallel_IAttr)
feat_CAff = self.fc_feat2aff(
torch.cat([self.mean_obj_features, feat_CAttr], 1)
) # (n_obj, dim_aff)
feat_IAff = self.aff_instantialize(
feat_CAff, torch.cat([feature, feat_aggr_IAttr], 1),
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
)
# (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAff = self.aff_inst_bn(feat_IAff)
feat_mean_IAff = torch.einsum("ijk,j->ik", feat_IAff, self.obj_frequence)
# (bz, dim_aff)
logit_CAff = self.aff_CA_classifier(feat_CAff)
logit_IAff = self.aff_IA_classifier(feat_mean_IAff)
prob_IAttr = torch.sigmoid(logit_IAttr)
prob_IAff = torch.sigmoid(logit_IAff)
if require_loss:
losses = {}
if self.args.lambda_attr > 0:
if self.args.lambda_cls_CA>0:
losses["loss_attr/CA_cls"] = self.attr_bce(logit_CAttr, gt_all_CAttr_vec)
if self.args.lambda_cls_IA>0:
losses["loss_attr/IA_cls"] = self.attr_bce(logit_IAttr, batch["gt_attr"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAttr = self.attr_IA_classifier(feat_IAttr)
losses["loss_attr/inst_IA_cls"] = self.attr_bce(
logit_inst_IAttr, batch["gt_attr"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_attr") for x in losses]):
losses["loss_attr/total"] = (
self.args.lambda_cls_CA * losses.get("loss_attr/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_attr/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_attr/inst_IA_cls", 0.) )
if self.args.lambda_aff > 0:
if self.args.lambda_cls_CA>0:
losses["loss_aff/CA_cls"] = self.aff_bce(logit_CAff, gt_all_CAff_vec)
if self.args.lambda_cls_IA>0:
losses["loss_aff/IA_cls"] = self.aff_bce(logit_IAff, batch["gt_aff"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAff = self.aff_IA_classifier(feat_IAff)
losses["loss_aff/inst_IA_cls"] = self.aff_bce(
logit_inst_IAff, batch["gt_aff"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_aff") for x in losses]):
losses["loss_aff/total"] = (
self.args.lambda_cls_CA * losses.get("loss_aff/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_aff/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_aff/inst_IA_cls", 0.) )
if self.args.lambda_cf > 0 and batch['gt_causal'].shape[0] > 0: | cf_inst_id, cf_attr_mask, cf_aff_mask = build_counterfactual( | 4 | 2023-11-07 13:03:27+00:00 | 8k |
ApolloAuto/apollo-model-centerpoint | paddle3d/models/detection/centerpoint/centerpoint.py | [
{
"identifier": "manager",
"path": "paddle3d/apis/manager.py",
"snippet": "class ComponentManager:\n def __init__(self, *, name: str, description: str = ''):\n def __len__(self):\n def __repr__(self):\n def __getitem__(self, item: str):\n def components_dict(self) -> dict:\n def name(self) -> str:\n def description(self) -> str:\n def _add_single_component(self, component: Callable):\n def add_component(self, components: Union[Callable, Iterable[Callable]]\n ) -> Union[Callable, Iterable[Callable]]:\nVOXEL_ENCODERS = ComponentManager(name=\"voxel_encoders\")\nMIDDLE_ENCODERS = ComponentManager(name=\"middle_encoders\")\nBACKBONES = ComponentManager(name=\"backbones\")\nMODELS = ComponentManager(name=\"models\")\nNECKS = ComponentManager(name=\"necks\")\nHEADS = ComponentManager(name=\"heads\")\nLOSSES = ComponentManager(name=\"losses\")\nDATASETS = ComponentManager(name=\"datasets\")\nTRANSFORMS = ComponentManager(name=\"transforms\")\nLR_SCHEDULERS = ComponentManager(name=\"lr_schedulers\")\nOPTIMIZERS = ComponentManager(name=\"optimizers\")\nVOXELIZERS = ComponentManager(name=\"voxelizers\")\nPOINT_ENCODERS = ComponentManager(name=\"point_encoders\")\nPOSITIONAL_ENCODING = ComponentManager(name=\"POSITIONAL_ENCODING\")\nTRANSFORMERS = ComponentManager(name=\"TRANSFORMERS\")\nTRANSFORMER_ENCODERS = ComponentManager(name=\"TRANSFORMER_ENCODERS\")\nTRANSFORMER_ENCODER_LAYERS = ComponentManager(name=\"TRANSFORMER_ENCODER_LAYERS\")\nATTENTIONS = ComponentManager(name=\"ATTENTIONS\")\nBBOX_CODERS = ComponentManager(name=\"BBOX_CODERS\")\nBBOX_ASSIGNERS = ComponentManager(name=\"BBOX_ASSIGNERS\")\nMATCH_COSTS = ComponentManager(name=\"MATCH_COSTS\")\nBBOX_SAMPLERS = ComponentManager(name=\"BBOX_SAMPLERS\")\nTRANSFORMER_DECODER_LAYERS = ComponentManager(name=\"TRANSFORMER_DECODER_LAYERS\")\nTRANSFORMER_DECODERS = ComponentManager(name=\"TRANSFORMER_DECODERS\")"
},
{
"identifier": "BBoxes3D",
"path": "paddle3d/geometries/bbox.py",
"snippet": "class BBoxes3D(_Structure):\n \"\"\"\n \"\"\"\n\n def __init__(self,\n data: np.ndarray,\n coordmode: CoordMode = 0,\n velocities: List[float] = None,\n origin: List[float] = [0.5, 0.5, 0.5],\n rot_axis: int = 2):\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n self.coordmode = coordmode\n self.velocities = velocities\n self.origin = origin\n self.rot_axis = rot_axis\n\n @property\n def corners_3d(self):\n # corners_3d format: x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0\n dx, dy, dz = self[:, 3:6].T\n b = dz.shape[0]\n\n x_corners = np.array([[0., 0., 0., 0., 1., 1., 1., 1.]],\n self.dtype).repeat(\n b, axis=0)\n y_corners = np.array([[0., 0., 1., 1., 0., 0., 1., 1.]],\n self.dtype).repeat(\n b, axis=0)\n z_corners = np.array([[0., 1., 1., 0., 0., 1., 1., 0.]],\n self.dtype).repeat(\n b, axis=0)\n\n x_corners = (\n dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]\n y_corners = (\n dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]\n z_corners = (\n dz[:, np.newaxis] * (z_corners - self.origin[2]))[:, :, np.newaxis]\n corners = np.concatenate([x_corners, y_corners, z_corners], axis=-1)\n\n angle = self[:, -1]\n corners = rotation_3d_in_axis(corners, angle, axis=self.rot_axis)\n centers = self[:, 0:3][:, np.newaxis, :]\n corners += centers\n\n return corners\n\n @property\n def corners_2d(self):\n # corners_2d format: x0y0, x0y1, x1y1, x1y0\n dx, dy = self[:, 3:5].T\n b = dy.shape[0]\n\n x_corners = np.array([[0., 0., 1., 1.]], self.dtype).repeat(b, axis=0)\n y_corners = np.array([[0., 1., 1., 0.]], self.dtype).repeat(b, axis=0)\n\n x_corners = (\n dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]\n y_corners = (\n dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]\n corners = np.concatenate([x_corners, y_corners], axis=-1)\n\n angle = self[:, -1]\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n rotation_matrix = np.array([[rot_cos, -rot_sin], [rot_sin, rot_cos]],\n dtype=self.dtype)\n #rotation_matrix = rotation_matrix.transpose([2, 0, 1])\n #corners = corners @ rotation_matrix #TODO(luoqianhui)\n corners = np.einsum(\"aij,jka->aik\", corners, rotation_matrix)\n\n centers = self[:, 0:2][:, np.newaxis, :]\n corners += centers\n\n return corners\n\n def scale(self, factor: float):\n \"\"\"\n \"\"\"\n # Scale x, y, z, w, l, h, except the orientation\n self[..., :-1] = self[..., :-1] * factor\n\n # Scale velocities\n if self.velocities is not None:\n self.velocities[..., :] = self.velocities[..., :] * factor\n\n def translate(self, translation: np.ndarray):\n self[..., :3] = self[..., :3] + translation\n\n def rotate_around_z(self, angle: np.ndarray):\n # Rotation matrix around the z-axis\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n rotation_matrix = np.array(\n [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],\n dtype=self.dtype)\n\n # Rotate x,y,z\n self[..., :3] = self[..., :3] @ rotation_matrix\n\n # Rotate velocities\n if self.velocities is not None:\n self.velocities[..., :2] = (np.hstack([\n self.velocities[..., :2],\n np.zeros(\n (self.velocities.shape[0], 1), dtype=self.velocities.dtype)\n ]) @ rotation_matrix)[..., :2]\n\n # Update orientation\n self[..., -1] += angle\n\n def horizontal_flip(self):\n \"\"\"\n The inputs are pixel indices\n \"\"\"\n self[:, 0] = -self[:, 0]\n if self.velocities is not None:\n self.velocities[:, 0] = -self.velocities[:, 0]\n self[:,\n -1] = -self[:,\n -1] + 2 * np.pi # TODO(luoqianhui): CHECK THIS 2 * np.pi is needed\n\n def horizontal_flip_coords(self):\n \"\"\"\n The inputs are floating point coordinates\n \"\"\"\n new_box3d_quat = np.stack(\n [self[:, 3], -self[:, 2], -self[:, 1], self[:, 0]], 1)\n self[:, :4] = new_box3d_quat\n self[:, 4] = -self[:, 4]\n\n def to_vision_based_3d_box(self):\n height, width, length = self[:, 3:4], self[:, 4:5], self[:, 5:6]\n x, y, z = self[:, 0:1], self[:, 1:2], self[:, 2:3]\n rotation = self[:, 6]\n tvec = np.concatenate([x, y - height / 2, z], axis=1)\n box_pose = []\n for i in range(rotation.shape[0]):\n wxyz = Quaternion(\n Quaternion(axis=[1, 0, 0], radians=np.pi / 2) * Quaternion(\n axis=[0, 0, 1], radians=-rotation[i]))\n box_pose.append(wxyz.elements.astype(np.float32))\n box_pose = np.stack(box_pose, axis=0)\n box3d_new = np.concatenate([box_pose, tvec, width, length, height],\n axis=1)\n return box3d_new\n\n def vertical_flip(self):\n self[:, 1] = -self[:, 1]\n if self.velocities is not None:\n self.velocities[:, 1] = -self.velocities[:, 1]\n self[:, -1] = -self[:, -1] + np.pi\n\n @staticmethod\n def limit_period(val, offset: float = 0.5, period: float = np.pi):\n return val - np.floor(val / period + offset) * period\n\n def get_mask_of_bboxes_outside_range(self, point_cloud_range: np.ndarray):\n bboxes_bev = self.corners_2d\n # Represent the bev range as a bounding box\n limit_polygons = minmax_range_3d_to_corner_2d(point_cloud_range)\n mask = points_in_convex_polygon_2d(\n bboxes_bev.reshape(-1, 2), limit_polygons)\n return np.any(mask.reshape(-1, 4), axis=1)\n\n def get_mask_of_small_bboxes(self, size_thr: np.ndarray):\n dim = self[:, 3:6]\n thr = size_thr.reshape(1, 3).repeat(self.shape[0], axis=0)\n mask = np.array((dim > thr))\n mask = np.all(mask, axis=1)\n return mask.nonzero()\n\n def masked_select(self, mask):\n selected_data = self[mask]\n selected_velocities = self.velocities\n if self.velocities is not None:\n selected_velocities = self.velocities[mask]\n selected_bbox = BBoxes3D(selected_data, self.coordmode,\n selected_velocities, self.origin,\n self.rot_axis)\n return selected_bbox"
},
{
"identifier": "BaseLidarModel",
"path": "paddle3d/models/base/base_lidar_detection.py",
"snippet": "class BaseLidarModel(BaseDetectionModel):\n def __init__(self,\n box_with_velocity: bool = False,\n with_voxelizer: bool = False,\n max_num_points_in_voxel: int = -1,\n in_channels: int = None):\n super().__init__(box_with_velocity=box_with_velocity)\n self.with_voxelizer = with_voxelizer\n self.max_num_points_in_voxel = max_num_points_in_voxel\n self.in_channels = in_channels\n self.point_dim = -1\n\n @property\n def inputs(self) -> List[dict]:\n if self.with_voxelizer:\n points = {\n 'name': 'data',\n 'dtype': 'float32',\n 'shape': [-1, self.point_dim]\n }\n res = [points]\n else:\n voxels = {\n 'name': 'voxels',\n 'dtype': 'float32',\n 'shape': [-1, self.max_num_points_in_voxel, self.in_channels]\n }\n coords = {'name': 'coords', 'dtype': 'int32', 'shape': [-1, 3]}\n num_points_per_voxel = {\n 'name': 'num_points_per_voxel',\n 'dtype': 'int32',\n 'shape': [-1]\n }\n res = [voxels, coords, num_points_per_voxel]\n return res\n\n @property\n def sensor(self) -> str:\n return \"lidar\""
},
{
"identifier": "Sample",
"path": "paddle3d/sample.py",
"snippet": "class Sample(_EasyDict):\n \"\"\"\n \"\"\"\n _VALID_MODALITIES = [\"image\", \"lidar\", \"radar\", \"multimodal\", \"multiview\"]\n\n def __init__(self, path: str, modality: str):\n if modality not in self._VALID_MODALITIES:\n raise ValueError('Only modality {} is supported, but got {}'.format(\n self._VALID_MODALITIES, modality))\n\n self.meta = SampleMeta()\n\n self.path = path\n self.data = None\n self.modality = modality.lower()\n\n self.bboxes_2d = None\n self.bboxes_3d = None\n self.labels = None\n\n self.sweeps = []\n self.attrs = None"
},
{
"identifier": "SampleMeta",
"path": "paddle3d/sample.py",
"snippet": "class SampleMeta(_EasyDict):\n \"\"\"\n \"\"\"\n # yapf: disable\n __slots__ = [\n \"camera_intrinsic\",\n # bgr or rgb\n \"image_format\",\n # pillow or cv2\n \"image_reader\",\n # chw or hwc\n \"channel_order\",\n # Unique ID of the sample\n \"id\",\n \"time_lag\",\n \"ref_from_curr\"\n ]\n # yapf: enable\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)"
},
{
"identifier": "load_pretrained_model",
"path": "paddle3d/utils/checkpoint.py",
"snippet": "def load_pretrained_model(model: paddle.nn.Layer,\n pretrained_model: Union[dict, str]):\n \"\"\"\n \"\"\"\n if isinstance(pretrained_model, dict):\n load_pretrained_model_from_state_dict(model, pretrained_model)\n elif isinstance(pretrained_model, str):\n if urlparse(pretrained_model).netloc:\n load_pretrained_model_from_url(model, pretrained_model)\n elif os.path.exists(pretrained_model):\n load_pretrained_model_from_path(model, pretrained_model)\n else:\n raise ValueError(\n '{} is neither a valid path nor a valid URL.'.format(\n pretrained_model))\n else:\n raise TypeError('Unsupported pretrained model type {}'.format(\n type(pretrained_model)))"
},
{
"identifier": "logger",
"path": "paddle3d/utils/logger.py",
"snippet": "class Logger(object):\nclass ProgressBar(object):\n def __init__(self, name: str = None):\n def format(self):\n def disable(self):\n def enable(self):\n def enabled(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, flush_interval: float = 0.1):\n def _printer():\n def progressbar(self, msg: str, flush_interval: float = 0.1):\n def range(self, stop: int, msg: str):\n def enumerate(self, iterable: Iterable, msg: str):\n def __init__(self, logger: Logger, flush_interval: float = 0.1):\n def update(self, progress: float):"
},
{
"identifier": "dtype2float32",
"path": "paddle3d/utils/amp_utils.py",
"snippet": "def dtype2float32(src_tensors):\n if isinstance(src_tensors,\n paddle.Tensor) and src_tensors.dtype != 'float32':\n return src_tensors.astype('float32')\n elif isinstance(src_tensors, Sequence):\n return type(src_tensors)([dtype2float32(x) for x in src_tensors])\n elif isinstance(src_tensors, Mapping):\n return {key: dtype2float32(x) for key, x in src_tensors.items()}\n return src_tensors"
}
] | import collections
import os
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from copy import deepcopy
from typing import Dict, List
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.models.base import BaseLidarModel
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils.checkpoint import load_pretrained_model
from paddle3d.utils.logger import logger
from paddle3d.utils import dtype2float32 | 5,148 | self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.bbox_head = bbox_head
self.test_cfg = DictObject(test_cfg)
self.sync_bn = True
if pretrained is not None:
load_pretrained_model(self, self.pretrained)
self.freeze = freeze
def _freeze(self):
if len(self.freeze) > 0:
freeze_layers = []
for layer_name in self.freeze:
if layer_name == 'shared_conv':
freeze_layers.append(
getattr(self, 'bbox_head').shared_conv)
elif isinstance(layer_name, str):
freeze_layers.append(getattr(self, layer_name))
elif isinstance(layer_name, list):
for current_layer in layer_name:
freeze_layers.append(
getattr(self, 'bbox_head').tasks[current_layer])
else:
raise NotImplementedError(
'The freeze_layer type {} is not supported'.format(
layer_name))
for freeze_layer in freeze_layers:
self.freeze_signle_layer(freeze_layer)
def freeze_signle_layer(self, layer):
layer.eval()
for param in layer.parameters():
param.trainable = False
for m in layer.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
def deploy_preprocess(self, points):
def true_fn(points):
points = points[:, 0:5]
return points
def false_fn(points):
points = points.reshape([1, -1, 4])
points = F.pad(
points, [0, 1], value=0, mode='constant', data_format="NCL")
points = points.reshape([-1, 5])
return points
points = paddle.static.nn.cond(
points.shape[-1] >=
5, lambda: true_fn(points), lambda: false_fn(points))
return points[:, 0:self.voxel_encoder.in_channels]
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def extract_feat(self, data):
voxels, coordinates, num_points_in_voxel = self.voxelizer(
data['points'])
data["features"] = voxels
data["num_points_in_voxel"] = num_points_in_voxel
data["coors"] = coordinates
input_features = self.voxel_encoder(
data["features"], data["num_points_in_voxel"], data["coors"])
x = self.middle_encoder(input_features, data["coors"],
data["batch_size"])
x = self.backbone(x)
x = self.neck(x)
return x
def train_forward(self, samples):
if len(self.freeze) > 0:
self._freeze()
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = dtype2float32(preds)
else:
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.loss(samples, preds, self.test_cfg)
def test_forward(self, samples):
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = self.bbox_head.predict(samples, preds, self.test_cfg)
preds = self._parse_results_to_sample(preds, samples)
return {'preds': preds}
def export_forward(self, samples):
batch_size = 1
points = samples["data"]
points = self.deploy_preprocess(points)
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.predict_by_custom_op(samples, preds,
self.test_cfg)
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DictObject(Dict):
def __init__(self, config: Dict):
for key, value in config.items():
if isinstance(value, dict):
setattr(self, key, DictObject(value))
else:
setattr(self, key, value)
@manager.MODELS.add_component
class CenterPoint(BaseLidarModel):
def __init__(self,
voxelizer,
voxel_encoder,
middle_encoder,
backbone,
neck,
bbox_head,
test_cfg=None,
pretrained=None,
box_with_velocity: bool = False,
freeze=[]):
super().__init__(
with_voxelizer=True, box_with_velocity=box_with_velocity)
self.voxelizer = voxelizer
self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.bbox_head = bbox_head
self.test_cfg = DictObject(test_cfg)
self.sync_bn = True
if pretrained is not None:
load_pretrained_model(self, self.pretrained)
self.freeze = freeze
def _freeze(self):
if len(self.freeze) > 0:
freeze_layers = []
for layer_name in self.freeze:
if layer_name == 'shared_conv':
freeze_layers.append(
getattr(self, 'bbox_head').shared_conv)
elif isinstance(layer_name, str):
freeze_layers.append(getattr(self, layer_name))
elif isinstance(layer_name, list):
for current_layer in layer_name:
freeze_layers.append(
getattr(self, 'bbox_head').tasks[current_layer])
else:
raise NotImplementedError(
'The freeze_layer type {} is not supported'.format(
layer_name))
for freeze_layer in freeze_layers:
self.freeze_signle_layer(freeze_layer)
def freeze_signle_layer(self, layer):
layer.eval()
for param in layer.parameters():
param.trainable = False
for m in layer.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
def deploy_preprocess(self, points):
def true_fn(points):
points = points[:, 0:5]
return points
def false_fn(points):
points = points.reshape([1, -1, 4])
points = F.pad(
points, [0, 1], value=0, mode='constant', data_format="NCL")
points = points.reshape([-1, 5])
return points
points = paddle.static.nn.cond(
points.shape[-1] >=
5, lambda: true_fn(points), lambda: false_fn(points))
return points[:, 0:self.voxel_encoder.in_channels]
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def extract_feat(self, data):
voxels, coordinates, num_points_in_voxel = self.voxelizer(
data['points'])
data["features"] = voxels
data["num_points_in_voxel"] = num_points_in_voxel
data["coors"] = coordinates
input_features = self.voxel_encoder(
data["features"], data["num_points_in_voxel"], data["coors"])
x = self.middle_encoder(input_features, data["coors"],
data["batch_size"])
x = self.backbone(x)
x = self.neck(x)
return x
def train_forward(self, samples):
if len(self.freeze) > 0:
self._freeze()
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = dtype2float32(preds)
else:
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.loss(samples, preds, self.test_cfg)
def test_forward(self, samples):
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = self.bbox_head.predict(samples, preds, self.test_cfg)
preds = self._parse_results_to_sample(preds, samples)
return {'preds': preds}
def export_forward(self, samples):
batch_size = 1
points = samples["data"]
points = self.deploy_preprocess(points)
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.predict_by_custom_op(samples, preds,
self.test_cfg)
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples): | data = Sample(sample["path"][i], sample["modality"][i]) | 3 | 2023-11-08 07:08:03+00:00 | 8k |
JustlfC03/SCUNet-plusplus | test.py | [
{
"identifier": "Synapse_dataset",
"path": "datasets/dataset_synapse.py",
"snippet": "class Synapse_dataset(Dataset):\n def __init__(self, base_dir, list_dir, split, transform=None):\n self.transform = transform\n self.split = split\n self.sample_list = open(os.path.join(list_dir, self.split + '.txt')).readlines()\n self.data_dir = base_dir\n\n def __len__(self):\n return len(self.sample_list)\n\n def __getitem__(self, idx):\n if self.split == \"train\":\n slice_name = self.sample_list[idx].strip('\\n')\n data_path = os.path.join(self.data_dir, slice_name + '.npz')\n data = np.load(data_path)\n image, label = data['image'], data['label']\n else:\n slice_name = self.sample_list[idx].strip('\\n')\n data_path = os.path.join(self.data_dir, slice_name + '.npz')\n data = np.load(data_path)\n image, label = data['image'], data['label']\n # 修改\n # vol_name = self.sample_list[idx].strip('\\n')\n # filepath = self.data_dir + \"/{}.npy.h5\".format(vol_name)\n # data = h5py.File(filepath)\n # image, label = data['image'][:], data['label'][:]\n image = torch.from_numpy(image.astype(np.float32))\n image = image.permute(2, 0, 1)\n label = torch.from_numpy(label.astype(np.float32))\n\n sample = {'image': image, 'label': label}\n if self.transform:\n sample = self.transform(sample)\n sample['case_name'] = self.sample_list[idx].strip('\\n')\n return sample"
},
{
"identifier": "test_single_volume",
"path": "utils.py",
"snippet": "def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):\n image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()\n _, x, y = image.shape\n\n if x != patch_size[0] or y != patch_size[1]:\n image = zoom(image, (1, patch_size[0] / x, patch_size[1] / y), order=3)\n input = torch.from_numpy(image).unsqueeze(0).float().cuda()\n net.eval()\n with torch.no_grad():\n out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)\n out = out.cpu().detach().numpy()\n if x != patch_size[0] or y != patch_size[1]:\n prediction = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)\n else:\n prediction = out\n metric_list = []\n for i in range(1, classes):\n metric_list.append(calculate_metric_percase(prediction == i, label == i))\n\n # if test_save_path is not None:\n # prediction = Image.fromarray(np.uint8(prediction)).convert('L')\n # prediction.save(test_save_path + '/' + case + '.png')\n\n if test_save_path is not None:\n a1 = copy.deepcopy(prediction)\n a2 = copy.deepcopy(prediction)\n a3 = copy.deepcopy(prediction)\n a1[a1 == 1] = 0\n a2[a2 == 1] = 255\n a3[a3 == 1] = 0\n a1 = Image.fromarray(np.uint8(a1)).convert('L')\n a2 = Image.fromarray(np.uint8(a2)).convert('L')\n a3 = Image.fromarray(np.uint8(a3)).convert('L')\n prediction = Image.merge('RGB', [a1, a2, a3])\n prediction.save(test_save_path + '/' + case + '.png')\n\n return metric_list"
},
{
"identifier": "SwinUnet",
"path": "networks/vision_transformer.py",
"snippet": "class SwinUnet(nn.Module):\n def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):\n super(SwinUnet, self).__init__()\n self.num_classes = num_classes\n self.zero_head = zero_head\n self.config = config\n\n self.swin_unet = SwinTransformerSys(img_size=config.DATA.IMG_SIZE,\n patch_size=config.MODEL.SWIN.PATCH_SIZE,\n in_chans=config.MODEL.SWIN.IN_CHANS,\n num_classes=self.num_classes,\n embed_dim=config.MODEL.SWIN.EMBED_DIM,\n depths=config.MODEL.SWIN.DEPTHS,\n num_heads=config.MODEL.SWIN.NUM_HEADS,\n window_size=config.MODEL.SWIN.WINDOW_SIZE,\n mlp_ratio=config.MODEL.SWIN.MLP_RATIO,\n qkv_bias=config.MODEL.SWIN.QKV_BIAS,\n qk_scale=config.MODEL.SWIN.QK_SCALE,\n drop_rate=config.MODEL.DROP_RATE,\n drop_path_rate=config.MODEL.DROP_PATH_RATE,\n ape=config.MODEL.SWIN.APE,\n patch_norm=config.MODEL.SWIN.PATCH_NORM,\n use_checkpoint=config.TRAIN.USE_CHECKPOINT)\n\n def forward(self, x):\n if x.size()[1] == 1:\n x = x.repeat(1, 3, 1, 1)\n logits = self.swin_unet(x)\n return logits\n\n def load_from(self, config):\n pretrained_path = config.MODEL.PRETRAIN_CKPT\n if pretrained_path is not None:\n print(\"pretrained_path:{}\".format(pretrained_path))\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n pretrained_dict = torch.load(pretrained_path, map_location=device)\n if \"model\" not in pretrained_dict:\n print(\"---start load pretrained modle by splitting---\")\n pretrained_dict = {k[17:]: v for k, v in pretrained_dict.items()}\n for k in list(pretrained_dict.keys()):\n if \"output\" in k:\n print(\"delete key:{}\".format(k))\n del pretrained_dict[k]\n msg = self.swin_unet.load_state_dict(pretrained_dict, strict=False)\n # print(msg)\n return\n pretrained_dict = pretrained_dict['model']\n print(\"---start load pretrained modle of swin encoder---\")\n\n model_dict = self.swin_unet.state_dict()\n full_dict = copy.deepcopy(pretrained_dict)\n for k, v in pretrained_dict.items():\n if \"layers.\" in k:\n current_layer_num = 3 - int(k[7:8])\n current_k = \"layers_up.\" + str(current_layer_num) + k[8:]\n full_dict.update({current_k: v})\n for k in list(full_dict.keys()):\n if k in model_dict:\n if full_dict[k].shape != model_dict[k].shape:\n print(\"delete:{};shape pretrain:{};shape model:{}\".format(k, v.shape, model_dict[k].shape))\n del full_dict[k]\n\n msg = self.swin_unet.load_state_dict(full_dict, strict=False)\n # print(msg)\n else:\n print(\"none pretrain\")"
},
{
"identifier": "trainer_synapse",
"path": "trainer.py",
"snippet": "def trainer_synapse(args, model, snapshot_path):\n from datasets.dataset_synapse import Synapse_dataset, RandomGenerator\n logging.basicConfig(filename=snapshot_path + \"/log.txt\", level=logging.INFO,\n format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n logging.info(str(args))\n base_lr = args.base_lr\n num_classes = args.num_classes\n batch_size = args.batch_size * args.n_gpu\n max_iterations = args.max_iterations\n db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split=\"train\",\n transform=transforms.Compose(\n [RandomGenerator(output_size=[args.img_size, args.img_size])]))\n print(\"The length of train set is: {}\".format(len(db_train)))\n\n def worker_init_fn(worker_id):\n random.seed(args.seed + worker_id)\n\n trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True,\n worker_init_fn=worker_init_fn)\n if args.n_gpu > 1:\n model = nn.DataParallel(model)\n model.train()\n ce_loss = CrossEntropyLoss()\n dice_loss = DiceLoss(num_classes)\n optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)\n writer = SummaryWriter(snapshot_path + '/log')\n iter_num = 0\n max_epoch = args.max_epochs\n max_iterations = args.max_epochs * len(trainloader)\n logging.info(\"{} iterations per epoch. {} max iterations \".format(len(trainloader), max_iterations))\n best_performance = 0.0\n iterator = tqdm(range(max_epoch), ncols=70)\n for epoch_num in iterator:\n for i_batch, sampled_batch in enumerate(trainloader):\n image_batch, label_batch = sampled_batch['image'], sampled_batch['label']\n image_batch, label_batch = image_batch.cuda(), label_batch.cuda()\n outputs = model(image_batch)\n loss_ce = ce_loss(outputs, label_batch[:].long())\n loss_dice = dice_loss(outputs, label_batch, softmax=True)\n loss = 0.4 * loss_ce + 0.6 * loss_dice\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_\n\n iter_num = iter_num + 1\n writer.add_scalar('info/lr', lr_, iter_num)\n writer.add_scalar('info/total_loss', loss, iter_num)\n writer.add_scalar('info/loss_ce', loss_ce, iter_num)\n\n logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item()))\n\n if iter_num % 20 == 0:\n image = image_batch[1, 0:1, :, :]\n image = (image - image.min()) / (image.max() - image.min())\n writer.add_image('train/Image', image, iter_num)\n outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True)\n writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num)\n labs = label_batch[1, ...].unsqueeze(0) * 50\n writer.add_image('train/GroundTruth', labs, iter_num)\n\n # save_interval = 50\n # if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:\n # save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n # torch.save(model.state_dict(), save_mode_path)\n # logging.info(\"save model to {}\".format(save_mode_path))\n #\n # if epoch_num >= max_epoch - 1:\n # save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n # torch.save(model.state_dict(), save_mode_path)\n # logging.info(\"save model to {}\".format(save_mode_path))\n # iterator.close()\n # break\n\n save_interval = 2\n if (epoch_num + 1) % save_interval == 0:\n save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n logging.info(\"save model to {}\".format(save_mode_path))\n\n if epoch_num >= max_epoch - 1:\n save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n logging.info(\"save model to {}\".format(save_mode_path))\n iterator.close()\n break\n\n writer.close()\n return \"Training Finished!\""
},
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config(args):\n \"\"\"Get a yacs CfgNode object with default values.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n config = _C.clone()\n update_config(config, args)\n\n return config"
}
] | import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.dataset_synapse import Synapse_dataset
from utils import test_single_volume
from networks.vision_transformer import SwinUnet as ViT_seg
from trainer import trainer_synapse
from config import get_config | 3,941 |
"""
--dataset Synapse
--cfg ./configs/swin_tiny_patch4_window7_224_lite.yaml
--is_saveni
--volume_path ./datasets/Synapse
--output_dir ./output
--max_epoch 150
--base_lr 0.05
--img_size 224
--batch_size 1
"""
parser = argparse.ArgumentParser()
parser.add_argument('--volume_path', type=str,
default='./datasets/Synapse/test_vol_h5',
help='root dir for validation volume data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
# parser.add_argument('--num_classes', type=int,
# default=9, help='output channel of network')
parser.add_argument('--num_classes', type=int,
default=2, help='output channel of network')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--output_dir', default='./output', type=str, help='output dir')
parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int, default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=1,
help='batch_size per gpu')
parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference')
parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
# print(args.volume_path)
config = get_config(args)
def inference(args, model, test_save_path=None):
db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
f = open(r'G:\FINAL\SCUNet++\lists\lists_Synapse\testxg.txt', 'w')
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0]
|
"""
--dataset Synapse
--cfg ./configs/swin_tiny_patch4_window7_224_lite.yaml
--is_saveni
--volume_path ./datasets/Synapse
--output_dir ./output
--max_epoch 150
--base_lr 0.05
--img_size 224
--batch_size 1
"""
parser = argparse.ArgumentParser()
parser.add_argument('--volume_path', type=str,
default='./datasets/Synapse/test_vol_h5',
help='root dir for validation volume data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
# parser.add_argument('--num_classes', type=int,
# default=9, help='output channel of network')
parser.add_argument('--num_classes', type=int,
default=2, help='output channel of network')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--output_dir', default='./output', type=str, help='output dir')
parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int, default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=1,
help='batch_size per gpu')
parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference')
parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
# print(args.volume_path)
config = get_config(args)
def inference(args, model, test_save_path=None):
db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
f = open(r'G:\FINAL\SCUNet++\lists\lists_Synapse\testxg.txt', 'w')
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] | metric_i = test_single_volume(image, label, model, classes=args.num_classes, | 1 | 2023-11-04 11:42:02+00:00 | 8k |
corcel-api/cortex.t | miner/miner.py | [
{
"identifier": "Embeddings",
"path": "template/protocol.py",
"snippet": "class Embeddings( bt.Synapse):\n \"\"\" A class to represent the embeddings request and response. \"\"\"\n\n texts: List[str] = pydantic.Field(\n ...,\n title=\"Text\",\n description=\"The list of input texts for which embeddings are to be generated.\"\n )\n\n model: str = pydantic.Field(\n default=\"text-embedding-ada-002\",\n title=\"Model\",\n description=\"The model used for generating embeddings.\"\n )\n\n embeddings: Optional[List[List[float]]] = pydantic.Field(\n None,\n title=\"Embeddings\",\n description=\"The resulting list of embeddings, each corresponding to an input text.\"\n )"
},
{
"identifier": "ImageResponse",
"path": "template/protocol.py",
"snippet": "class ImageResponse(bt.Synapse):\n \"\"\" A class to represent the response for an image-related request. \"\"\"\n # https://platform.stability.ai/docs/api-reference#tag/v1generation/operation/textToImage\n\n completion: Optional[Dict] = pydantic.Field(\n None,\n title=\"Completion\",\n description=\"The completion data of the image response.\"\n )\n\n messages: str = pydantic.Field(\n ...,\n title=\"Messages\",\n description=\"Messages related to the image response.\"\n )\n\n provider: str = pydantic.Field(\n default=\"OpenAI\",\n title=\"Provider\",\n description=\"The provider to use when calling for your response.\"\n )\n\n seed: int = pydantic.Field(\n default=1234,\n title=\"Seed\",\n description=\"The seed that which to generate the image with\"\n )\n\n samples: int = pydantic.Field(\n default=1,\n title=\"Samples\",\n description=\"The number of samples to generate\"\n )\n\n cfg_scale: float = pydantic.Field(\n default=8.0,\n title=\"cfg_scale\",\n description=\"The cfg_scale to use for image generation\"\n )\n\n # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde)\n sampler: str = pydantic.Field(\n default=\"\",\n title=\"Sampler\",\n description=\"The sampler to use for image generation\"\n )\n\n steps: int = pydantic.Field(\n default=30,\n title=\"Seed\",\n description=\"The steps to take in generating the image\"\n )\n\n model: str = pydantic.Field(\n default=\"dall-e-2\",\n title=\"Model\",\n description=\"The model used for generating the image.\"\n )\n\n style: str = pydantic.Field(\n default=\"vivid\",\n title=\"Style\",\n description=\"The style of the image.\"\n )\n\n size: str = pydantic.Field(\n default=\"1024x1024\",\n title=\"The size of the image, used for Openai generation. Options are 1024x1024, 1792x1024, 1024x1792 for dalle3\",\n description=\"The size of the image.\"\n )\n\n height: int = pydantic.Field(\n default=1024,\n title=\"Height used for non Openai images\",\n description=\"height\"\n )\n\n width: int = pydantic.Field(\n default=1024,\n title=\"Width used for non Openai images\",\n description=\"width\"\n )\n\n quality: str = pydantic.Field(\n default=\"standard\",\n title=\"Quality\",\n description=\"The quality of the image.\"\n )\n\n required_hash_fields: List[str] = pydantic.Field(\n [\"messages\"],\n title=\"Required Hash Fields\",\n description=\"A list of fields required for the hash.\"\n )\n\n def deserialize(self) -> Optional[Dict]:\n \"\"\" Deserialize the completion data of the image response. \"\"\"\n return self.completion"
},
{
"identifier": "IsAlive",
"path": "template/protocol.py",
"snippet": "class IsAlive( bt.Synapse ):\n answer: Optional[str] = None\n completion: str = pydantic.Field(\n \"\",\n title=\"Completion\",\n description=\"Completion status of the current StreamPrompting object. \"\n \"This attribute is mutable and can be updated.\",\n )"
},
{
"identifier": "StreamPrompting",
"path": "template/protocol.py",
"snippet": "class StreamPrompting(bt.StreamingSynapse):\n\n messages: List[Dict[str, str]] = pydantic.Field(\n ...,\n title=\"Messages\",\n description=\"A list of messages in the StreamPrompting scenario, \"\n \"each containing a role and content. Immutable.\",\n allow_mutation=False,\n )\n\n required_hash_fields: List[str] = pydantic.Field(\n [\"messages\"],\n title=\"Required Hash Fields\",\n description=\"A list of required fields for the hash.\",\n allow_mutation=False,\n )\n\n seed: int = pydantic.Field(\n default=\"1234\",\n title=\"Seed\",\n description=\"Seed for text generation. This attribute is immutable and cannot be updated.\",\n )\n\n temperature: float = pydantic.Field(\n default=0.0001,\n title=\"Temperature\",\n description=\"Temperature for text generation. \"\n \"This attribute is immutable and cannot be updated.\",\n )\n\n max_tokens: int = pydantic.Field(\n default=2048,\n title=\"Max Tokens\",\n description=\"Max tokens for text generation. \"\n \"This attribute is immutable and cannot be updated.\",\n )\n\n top_p: float = pydantic.Field(\n default=0.001,\n title=\"Top_p\",\n description=\"Top_p for text generation. The sampler will pick one of \"\n \"the top p percent tokens in the logit distirbution. \"\n \"This attribute is immutable and cannot be updated.\",\n )\n\n top_k: int = pydantic.Field(\n default=1,\n title=\"Top_k\",\n description=\"Top_k for text generation. Sampler will pick one of \"\n \"the k most probablistic tokens in the logit distribtion. \"\n \"This attribute is immutable and cannot be updated.\",\n )\n\n completion: str = pydantic.Field(\n None,\n title=\"Completion\",\n description=\"Completion status of the current StreamPrompting object. \"\n \"This attribute is mutable and can be updated.\",\n )\n\n provider: str = pydantic.Field(\n default=\"OpenAI\",\n title=\"Provider\",\n description=\"The provider to use when calling for your response.\"\n )\n\n model: str = pydantic.Field(\n default=\"gpt-3.5-turbo\",\n title=\"model\",\n description=\"The model to use when calling provider for your response.\",\n )\n\n async def process_streaming_response(self, response: StreamingResponse) -> AsyncIterator[str]:\n if self.completion is None:\n self.completion = \"\"\n async for chunk in response.content.iter_any():\n tokens = chunk.decode(\"utf-8\")\n for token in tokens:\n if token:\n self.completion += token\n yield tokens\n\n def deserialize(self) -> str:\n return self.completion\n\n def extract_response_json(self, response: StreamingResponse) -> dict:\n headers = {\n k.decode(\"utf-8\"): v.decode(\"utf-8\")\n for k, v in response.__dict__[\"_raw_headers\"]\n }\n\n def extract_info(prefix: str) -> dict[str, str]:\n return {\n key.split(\"_\")[-1]: value\n for key, value in headers.items()\n if key.startswith(prefix)\n }\n\n return {\n \"name\": headers.get(\"name\", \"\"),\n \"timeout\": float(headers.get(\"timeout\", 0)),\n \"total_size\": int(headers.get(\"total_size\", 0)),\n \"header_size\": int(headers.get(\"header_size\", 0)),\n \"dendrite\": extract_info(\"bt_header_dendrite\"),\n \"axon\": extract_info(\"bt_header_axon\"),\n \"messages\": self.messages,\n \"completion\": self.completion,\n }"
},
{
"identifier": "get_version",
"path": "template/utils.py",
"snippet": "def get_version(line_number: int = 22) -> Optional[str]:\n url = \"https://api.github.com/repos/corcel-api/cortex.t/contents/template/__init__.py\"\n response = requests.get(url, timeout=10)\n if not response.ok:\n bt.logging.error(\"github api call failed\")\n return None\n\n content = response.json()['content']\n decoded_content = base64.b64decode(content).decode('utf-8')\n lines = decoded_content.split('\\n')\n if line_number > len(lines):\n raise Exception(\"Line number exceeds file length\")\n\n version_line = lines[line_number - 1]\n version_match = re.search(r'__version__ = \"(.*?)\"', version_line)\n if not version_match:\n raise Exception(\"Version information not found in the specified line\")\n\n return version_match.group(1)"
}
] | import base # noqa
import argparse
import asyncio
import copy
import json
import os
import io
import base64
import boto3
import pathlib
import threading
import time
import requests
import traceback
import requests
import anthropic
import bittensor as bt
import wandb
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
import template
import sys
from abc import ABC, abstractmethod
from collections import deque
from functools import partial
from typing import Tuple
from stability_sdk import client
from config import check_config, get_config
from openai import AsyncOpenAI, OpenAI
from PIL import Image
from anthropic_bedrock import AsyncAnthropicBedrock, HUMAN_PROMPT, AI_PROMPT, AnthropicBedrock
from template.protocol import Embeddings, ImageResponse, IsAlive, StreamPrompting
from template.utils import get_version
from starlette.types import Send | 4,033 | bt.logging.info(
f"Running miner for subnet: {self.config.netuid} "
f"on network: {self.subtensor.chain_endpoint} with config:"
)
# metagraph provides the network's current state, holding state about other participants in a subnet.
self.metagraph = self.subtensor.metagraph(self.config.netuid)
bt.logging.info(f"Metagraph: {self.metagraph}")
if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys:
bt.logging.error(
f"\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} "
f"\nRun btcli register and try again. "
)
sys.exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
self.my_subnet_uid = self.metagraph.hotkeys.index(
self.wallet.hotkey.ss58_address
)
bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}")
# The axon handles request processing, allowing validators to send this process requests.
self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)
# Attach determiners which functions are called when servicing a request.
bt.logging.info("Attaching forward function to axon.")
print(f"Attaching forward function to axon. {self._prompt}")
self.axon.attach(
forward_fn=self._prompt,
blacklist_fn=self.blacklist_prompt,
).attach(
forward_fn=self._is_alive,
blacklist_fn=self.blacklist_is_alive,
).attach(
forward_fn=self._images,
blacklist_fn=self.blacklist_images,
).attach(
forward_fn=self._embeddings,
blacklist_fn=self.blacklist_embeddings,
)
bt.logging.info(f"Axon created: {self.axon}")
# Instantiate runners
self.should_exit: bool = False
self.is_running: bool = False
self.thread: threading.Thread = None
self.lock = asyncio.Lock()
self.request_timestamps: dict = {}
thread = threading.Thread(target=get_valid_hotkeys, args=(self.config,))
# thread.start()
@abstractmethod
def config(self) -> bt.config:
...
def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
return self.prompt(synapse)
def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
try:
hotkey = synapse.dendrite.hotkey
synapse_type = type(synapse).__name__
if hotkey in template.WHITELISTED_KEYS:
return False, f"accepting {synapse_type} request from {hotkey}"
if hotkey not in valid_hotkeys:
return True, f"Blacklisted a {synapse_type} request from a non-valid hotkey: {hotkey}"
uid = None
for uid, _axon in enumerate(self.metagraph.axons): # noqa: B007
if _axon.hotkey == hotkey:
break
if uid is None and template.ALLOW_NON_REGISTERED is False:
return True, f"Blacklisted a non registered hotkey's {synapse_type} request from {hotkey}"
# check the stake
tao = self.metagraph.neurons[uid].stake.tao
# metagraph.neurons[uid].S
if tao < blacklist_amt:
return True, f"Blacklisted a low stake {synapse_type} request: {tao} < {blacklist_amt} from {hotkey}"
time_window = template.MIN_REQUEST_PERIOD * 60
current_time = time.time()
if hotkey not in self.request_timestamps:
self.request_timestamps[hotkey] = deque()
# Remove timestamps outside the current time window
while self.request_timestamps[hotkey] and current_time - self.request_timestamps[hotkey][0] > time_window:
self.request_timestamps[hotkey].popleft()
# Check if the number of requests exceeds the limit
if len(self.request_timestamps[hotkey]) >= template.MAX_REQUESTS:
return (
True,
f"Request frequency for {hotkey} exceeded: "
f"{len(self.request_timestamps[hotkey])} requests in {template.MIN_REQUEST_PERIOD} minutes. "
f"Limit is {template.MAX_REQUESTS} requests."
)
self.request_timestamps[hotkey].append(current_time)
return False, f"accepting {synapse_type} request from {hotkey}"
except Exception:
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")
def blacklist_prompt( self, synapse: StreamPrompting ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.PROMPT_BLACKLIST_STAKE)
bt.logging.info(blacklist[1])
return blacklist
def blacklist_is_alive( self, synapse: IsAlive ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.ISALIVE_BLACKLIST_STAKE)
bt.logging.debug(blacklist[1])
return blacklist
|
OpenAI.api_key = os.environ.get("OPENAI_API_KEY")
if not OpenAI.api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable.")
stability_api = client.StabilityInference(
key=os.environ['STABILITY_KEY'],
verbose=True,
engine="stable-diffusion-xl-1024-v1-0"
)
api_key = os.environ.get("ANTHROPIC_API_KEY")
bedrock_client = AsyncAnthropicBedrock(
# default is 10 minutes
# more granular timeout options: timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
timeout=60.0,
)
anthropic_client = anthropic.Anthropic()
anthropic_client.api_key = api_key
netrc_path = pathlib.Path.home() / ".netrc"
wandb_api_key = os.getenv("WANDB_API_KEY")
bt.logging.info("WANDB_API_KEY is set")
bt.logging.info("~/.netrc exists:", netrc_path.exists())
if not wandb_api_key and not netrc_path.exists():
raise ValueError("Please log in to wandb using `wandb login` or set the WANDB_API_KEY environment variable.")
client = AsyncOpenAI(timeout=60.0)
valid_hotkeys = []
class StreamMiner(ABC):
def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
bt.logging.info("starting stream miner")
base_config = copy.deepcopy(config or get_config())
self.config = self.config()
self.config.merge(base_config)
check_config(StreamMiner, self.config)
bt.logging.info(self.config) # TODO: duplicate print?
self.prompt_cache: dict[str, Tuple[str, int]] = {}
self.request_timestamps = {}
# Activating Bittensor's logging with the set configurations.
bt.logging(config=self.config, logging_dir=self.config.full_path)
bt.logging.info("Setting up bittensor objects.")
# Wallet holds cryptographic information, ensuring secure transactions and communication.
self.wallet = wallet or bt.wallet(config=self.config)
bt.logging.info(f"Wallet {self.wallet}")
# subtensor manages the blockchain connection, facilitating interaction with the Bittensor blockchain.
self.subtensor = subtensor or bt.subtensor(config=self.config)
bt.logging.info(f"Subtensor: {self.subtensor}")
bt.logging.info(
f"Running miner for subnet: {self.config.netuid} "
f"on network: {self.subtensor.chain_endpoint} with config:"
)
# metagraph provides the network's current state, holding state about other participants in a subnet.
self.metagraph = self.subtensor.metagraph(self.config.netuid)
bt.logging.info(f"Metagraph: {self.metagraph}")
if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys:
bt.logging.error(
f"\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} "
f"\nRun btcli register and try again. "
)
sys.exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
self.my_subnet_uid = self.metagraph.hotkeys.index(
self.wallet.hotkey.ss58_address
)
bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}")
# The axon handles request processing, allowing validators to send this process requests.
self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)
# Attach determiners which functions are called when servicing a request.
bt.logging.info("Attaching forward function to axon.")
print(f"Attaching forward function to axon. {self._prompt}")
self.axon.attach(
forward_fn=self._prompt,
blacklist_fn=self.blacklist_prompt,
).attach(
forward_fn=self._is_alive,
blacklist_fn=self.blacklist_is_alive,
).attach(
forward_fn=self._images,
blacklist_fn=self.blacklist_images,
).attach(
forward_fn=self._embeddings,
blacklist_fn=self.blacklist_embeddings,
)
bt.logging.info(f"Axon created: {self.axon}")
# Instantiate runners
self.should_exit: bool = False
self.is_running: bool = False
self.thread: threading.Thread = None
self.lock = asyncio.Lock()
self.request_timestamps: dict = {}
thread = threading.Thread(target=get_valid_hotkeys, args=(self.config,))
# thread.start()
@abstractmethod
def config(self) -> bt.config:
...
def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
return self.prompt(synapse)
def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
try:
hotkey = synapse.dendrite.hotkey
synapse_type = type(synapse).__name__
if hotkey in template.WHITELISTED_KEYS:
return False, f"accepting {synapse_type} request from {hotkey}"
if hotkey not in valid_hotkeys:
return True, f"Blacklisted a {synapse_type} request from a non-valid hotkey: {hotkey}"
uid = None
for uid, _axon in enumerate(self.metagraph.axons): # noqa: B007
if _axon.hotkey == hotkey:
break
if uid is None and template.ALLOW_NON_REGISTERED is False:
return True, f"Blacklisted a non registered hotkey's {synapse_type} request from {hotkey}"
# check the stake
tao = self.metagraph.neurons[uid].stake.tao
# metagraph.neurons[uid].S
if tao < blacklist_amt:
return True, f"Blacklisted a low stake {synapse_type} request: {tao} < {blacklist_amt} from {hotkey}"
time_window = template.MIN_REQUEST_PERIOD * 60
current_time = time.time()
if hotkey not in self.request_timestamps:
self.request_timestamps[hotkey] = deque()
# Remove timestamps outside the current time window
while self.request_timestamps[hotkey] and current_time - self.request_timestamps[hotkey][0] > time_window:
self.request_timestamps[hotkey].popleft()
# Check if the number of requests exceeds the limit
if len(self.request_timestamps[hotkey]) >= template.MAX_REQUESTS:
return (
True,
f"Request frequency for {hotkey} exceeded: "
f"{len(self.request_timestamps[hotkey])} requests in {template.MIN_REQUEST_PERIOD} minutes. "
f"Limit is {template.MAX_REQUESTS} requests."
)
self.request_timestamps[hotkey].append(current_time)
return False, f"accepting {synapse_type} request from {hotkey}"
except Exception:
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")
def blacklist_prompt( self, synapse: StreamPrompting ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.PROMPT_BLACKLIST_STAKE)
bt.logging.info(blacklist[1])
return blacklist
def blacklist_is_alive( self, synapse: IsAlive ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.ISALIVE_BLACKLIST_STAKE)
bt.logging.debug(blacklist[1])
return blacklist
| def blacklist_images( self, synapse: ImageResponse ) -> Tuple[bool, str]: | 1 | 2023-11-06 10:35:34+00:00 | 8k |
ljy0ustc/LLaRA | main.py | [
{
"identifier": "MInterface",
"path": "model/model_interface.py",
"snippet": "class MInterface(pl.LightningModule):\n def __init__(self, \n **kargs):\n super().__init__()\n self.save_hyperparameters()\n self.load_llm(self.hparams.llm_path)\n self.load_rec_model(self.hparams.rec_model_path)\n self.load_projector()\n\n def forward(self, batch):\n targets = batch[\"tokens\"].input_ids.masked_fill(\n batch[\"tokens\"].input_ids == self.llama_tokenizer.pad_token_id, -100\n ) # [batch_size, max_len]\n targets = targets.masked_fill((batch[\"tokens\"].token_type_ids == 0)[:,1:], -100)\n input_embeds = self.wrap_emb(batch)\n outputs = self.llama_model(\n inputs_embeds=input_embeds,\n attention_mask=batch[\"tokens\"].attention_mask,\n return_dict=True,\n labels=targets,\n use_cache=False\n )\n return outputs\n\n def generate(self, batch,temperature=0.8,do_sample=False,num_beams=1,max_gen_length=64,min_gen_length=1,repetition_penalty=1.0,length_penalty=1.0, num_return_sequences=1):\n input_embeds = self.wrap_emb(batch)\n generate_ids = self.llama_model.generate(\n inputs_embeds=input_embeds,\n attention_mask=batch[\"tokens\"].attention_mask,\n temperature=temperature,\n do_sample=do_sample,\n num_beams=num_beams,\n max_new_tokens=max_gen_length,\n min_new_tokens=min_gen_length,\n pad_token_id=self.llama_tokenizer.pad_token_id,\n repetition_penalty=repetition_penalty,\n length_penalty=length_penalty,\n num_return_sequences=num_return_sequences\n )\n output_text=self.llama_tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n outputs=[text.strip() for text in output_text]\n return outputs\n\n def training_step(self, batch, batch_idx):\n if self.scheduler:\n self.scheduler.step(self.trainer.global_step, self.current_epoch, self.trainer.max_steps)\n if batch[\"flag\"]:\n for name, param in self.projector.named_parameters():\n param.requires_grad = False\n else:\n for name, param in self.projector.named_parameters():\n param.requires_grad = True\n out = self(batch)\n loss = self.configure_loss(out)\n self.log('loss', loss, on_step=True, on_epoch=True, prog_bar=True)\n self.log('lr', self.scheduler.optimizer.param_groups[0]['lr'], on_step=True, on_epoch=True, prog_bar=True)\n self.log('global_step_num', self.trainer.global_step, on_step=True, on_epoch=True, prog_bar=True)\n return loss\n \n def on_validation_epoch_start(self):\n self.val_content={\n \"generate\":[],\n \"real\":[],\n \"cans\":[],\n }\n\n @torch.no_grad()\n def validation_step(self, batch, batch_idx):\n generate_output = self.generate(batch)\n output=[]\n for i,generate in enumerate(generate_output):\n real=batch['correct_answer'][i]\n cans=batch['cans_name'][i]\n generate=generate.strip().split(\"\\n\")[0]\n output.append((generate,real,cans))\n return output\n\n def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx):\n for generate,real,cans in outputs:\n self.val_content[\"generate\"].append(generate)\n self.val_content[\"real\"].append(real)\n self.val_content[\"cans\"].append(cans)\n\n def on_validation_epoch_end(self):\n df=DataFrame(self.val_content)\n if not os.path.exists(self.hparams.output_dir):\n os.makedirs(self.hparams.output_dir)\n df.to_csv(op.join(self.hparams.output_dir, 'valid.csv'))\n prediction_valid_ratio,hr=self.calculate_hr1(self.val_content)\n metric=hr*prediction_valid_ratio\n self.log('val_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)\n self.log('val_hr', hr, on_step=False, on_epoch=True, prog_bar=True)\n self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)\n\n def on_test_epoch_start(self):\n self.test_content={\n \"generate\":[],\n \"real\":[],\n \"cans\":[],\n }\n\n @torch.no_grad()\n def test_step(self, batch, batch_idx):\n generate_output = self.generate(batch)\n output=[]\n for i,generate in enumerate(generate_output):\n real=batch['correct_answer'][i]\n cans=batch['cans_name'][i]\n generate=generate.strip().split(\"\\n\")[0]\n output.append((generate,real,cans))\n return output\n \n def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx):\n for generate,real,cans in outputs:\n self.test_content[\"generate\"].append(generate)\n self.test_content[\"real\"].append(real)\n self.test_content[\"cans\"].append(cans)\n\n def on_test_epoch_end(self):\n df=DataFrame(self.test_content)\n if not os.path.exists(self.hparams.output_dir):\n os.makedirs(self.hparams.output_dir)\n df.to_csv(op.join(self.hparams.output_dir, 'test.csv'))\n prediction_valid_ratio,hr=self.calculate_hr1(self.test_content)\n metric=hr*prediction_valid_ratio\n self.log('test_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)\n self.log('test_hr', hr, on_step=False, on_epoch=True, prog_bar=True)\n self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)\n\n def configure_optimizers(self):\n if hasattr(self.hparams, 'weight_decay'):\n weight_decay = self.hparams.weight_decay\n else:\n weight_decay = 0\n optimizer = torch.optim.Adam([\n {'params': self.projector.parameters(), 'lr': self.hparams.lr, 'weight_decay':weight_decay},\n {'params': self.llama_model.parameters(), 'lr': self.hparams.lr}\n ])\n\n if self.hparams.lr_scheduler is None:\n return optimizer\n else:\n max_step = self.trainer.max_steps\n warmup_steps = max_step // 20\n print(f'max_step: {max_step}')\n print(f'warmup_steps: {warmup_steps}')\n if self.hparams.lr_scheduler == 'cosine':\n self.scheduler = LinearWarmupCosineLRScheduler(optimizer,\n max_step=max_step,\n min_lr=self.hparams.lr_decay_min_lr,\n init_lr=self.hparams.lr,\n warmup_steps=warmup_steps,\n warmup_start_lr=self.hparams.lr_warmup_start_lr)\n else:\n self.scheduler = None\n raise ValueError('Invalid lr_scheduler type!')\n return optimizer\n\n def configure_loss(self, out, labels=None):\n loss = self.hparams.loss.lower()\n if loss == 'lm':\n return out.loss\n else:\n raise ValueError(\"Invalid Loss Type!\")\n \n def on_save_checkpoint(self, checkpoint):\n if self.hparams.save == 'part':\n checkpoint.pop('optimizer_states')\n to_be_removed = []\n for key, value in checkpoint['state_dict'].items():\n try:\n if not self.get_parameter(key).requires_grad:\n to_be_removed.append(key)\n except AttributeError:\n to_be_removed.append(key)\n for key in to_be_removed:\n checkpoint['state_dict'].pop(key)\n elif self.hparams.save == 'all':\n pass\n \n def load_llm(self, llm_path):\n print('Loading LLAMA')\n self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)\n self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token\n self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.llama_tokenizer.padding_side = \"right\"\n self.llama_tokenizer.add_special_tokens({'additional_special_tokens': ['[PH]','[HistoryEmb]','[CansEmb]','[ItemEmb]']})\n self.llama_model = LlamaForCausalLM.from_pretrained(llm_path, torch_dtype=torch.bfloat16)\n self.llama_model.resize_token_embeddings(len(self.llama_tokenizer))\n if self.hparams.llm_tuning == 'lora':\n if self.hparams.peft_dir:\n self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)\n else:\n if self.hparams.peft_config:\n peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))\n else:\n peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,\n inference_mode=False,\n r=self.hparams.lora_r,\n lora_alpha=self.hparams.lora_alpha,\n lora_dropout=self.hparams.lora_dropout,\n target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])\n self.peft_config = peft_config\n self.llama_model = get_peft_model(self.llama_model, peft_config)\n self.llama_model.print_trainable_parameters()\n elif self.hparams.llm_tuning == 'freeze':\n for name, param in self.llama_model.named_parameters():\n param.requires_grad = False\n elif self.hparams.llm_tuning == 'freeze_lora':\n if self.hparams.peft_dir:\n self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)\n else:\n if self.hparams.peft_config:\n peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))\n else:\n peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,\n inference_mode=False,\n r=self.hparams.lora_r,\n lora_alpha=self.hparams.lora_alpha,\n lora_dropout=self.hparams.lora_dropout,\n target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])\n self.peft_config = peft_config\n self.llama_model = get_peft_model(self.llama_model, peft_config)\n for name, param in self.llama_model.named_parameters():\n param.requires_grad = False\n self.llama_model.print_trainable_parameters()\n else:\n raise NotImplementedError()\n \n print('Loading LLAMA Done')\n\n def load_projector(self):\n name = self.hparams.model_name\n camel_name = ''.join([i.capitalize() for i in name.split('_')])\n try:\n Model = getattr(importlib.import_module(\n '.'+name, package=__package__), camel_name)\n except:\n raise ValueError(\n f'Invalid Module File Name or Invalid Class Name {name}.{camel_name}!')\n self.projector = self.instancialize(Model, rec_size=self.hparams.rec_size, llm_size=self.llama_model.config.hidden_size)\n\n def instancialize(self, Model, **other_args):\n class_args = inspect.getargspec(Model.__init__).args[1:]\n inkeys = self.hparams.keys()\n args1 = {}\n for arg in class_args:\n if arg in inkeys:\n args1[arg] = getattr(self.hparams, arg)\n args1.update(other_args)\n return Model(**args1)\n\n def load_rec_model(self, rec_model_path):\n print('Loading Rec Model')\n self.rec_model = torch.load(rec_model_path, map_location=\"cpu\")\n self.rec_model.eval()\n for name, param in self.rec_model.named_parameters():\n param.requires_grad = False\n print('Loding Rec model Done')\n\n def encode_items(self, seq):\n if self.hparams.rec_embed==\"SASRec\":\n item_rec_embs=self.rec_model.cacu_x(seq)\n elif self.hparams.rec_embed in ['Caser','GRU']:\n item_rec_embs=self.rec_model.item_embeddings(seq)\n item_txt_embs=self.projector(item_rec_embs)\n return item_txt_embs\n \n def embed_tokens(self, token_ids):\n embeds = self.llama_model.base_model.embed_tokens(token_ids)\n return embeds\n\n def wrap_emb(self, batch):\n input_embeds = self.llama_model.get_input_embeddings()(batch[\"tokens\"].input_ids)\n \n his_token_id=self.llama_tokenizer(\"[HistoryEmb]\", return_tensors=\"pt\",add_special_tokens=False).input_ids.item()\n cans_token_id=self.llama_tokenizer(\"[CansEmb]\", return_tensors=\"pt\",add_special_tokens=False).input_ids.item()\n item_token_id=self.llama_tokenizer(\"[ItemEmb]\", return_tensors=\"pt\",add_special_tokens=False).input_ids.item()\n his_item_embeds= self.encode_items(batch[\"seq\"])\n cans_item_embeds= self.encode_items(batch[\"cans\"])\n item_embeds=self.encode_items(batch[\"item_id\"])\n \n for i in range(len(batch[\"len_seq\"])):\n if (batch[\"tokens\"].input_ids[i]==his_token_id).nonzero().shape[0]>0:\n idx_tensor=(batch[\"tokens\"].input_ids[i]==his_token_id).nonzero().view(-1)\n for idx, item_emb in zip(idx_tensor,his_item_embeds[i,:batch[\"len_seq\"][i].item()]):\n input_embeds[i,idx]=item_emb\n if (batch[\"tokens\"].input_ids[i]==cans_token_id).nonzero().shape[0]>0:\n idx_tensor=(batch[\"tokens\"].input_ids[i]==cans_token_id).nonzero().view(-1)\n for idx, item_emb in zip(idx_tensor,cans_item_embeds[i,:batch[\"len_cans\"][i].item()]):\n input_embeds[i,idx]=item_emb\n if (batch[\"tokens\"].input_ids[i]==item_token_id).nonzero().shape[0]>0:\n idx=(batch[\"tokens\"].input_ids[i]==item_token_id).nonzero().item()\n input_embeds[i,idx]=item_embeds[i]\n return input_embeds\n \n def calculate_hr1(self,eval_content):\n correct_num=0\n valid_num=0\n total_num=0\n for i,generate in enumerate(eval_content[\"generate\"]):\n real=eval_content[\"real\"][i]\n cans=eval_content[\"cans\"][i]\n total_num+=1\n generate=generate.strip().lower().strip()\n real=real.strip().lower().strip()\n cans=[item.strip().lower().strip() for item in cans]\n gen_cans_list=[]\n for cans_item in cans:\n if cans_item in generate:\n gen_cans_list.append(cans_item)\n if len(gen_cans_list)==1:\n valid_num+=1\n if real == gen_cans_list[0]:\n correct_num+=1\n valid_ratio=valid_num/total_num\n if valid_num>0:\n hr1=correct_num/valid_num\n else:\n hr1=0\n return valid_ratio,hr1"
},
{
"identifier": "DInterface",
"path": "data/data_interface.py",
"snippet": "class DInterface(pl.LightningDataModule):\n\n def __init__(self, \n llm_tokenizer=None,\n num_workers=8,\n dataset='',\n **kwargs):\n super().__init__()\n self.num_workers = num_workers\n self.llm_tokenizer=llm_tokenizer\n self.dataset = dataset\n self.kwargs = kwargs\n self.batch_size = kwargs['batch_size']\n self.max_epochs = kwargs['max_epochs']\n self.load_data_module()\n self.load_prompt(kwargs['prompt_path'])\n\n self.trainset = self.instancialize(stage='train')\n self.valset = self.instancialize(stage='val')\n self.testset = self.instancialize(stage='test')\n self.max_steps = self.max_epochs*(len(self.trainset)//self.batch_size)//self.num_workers\n\n def train_dataloader(self):\n return DataLoader(self.trainset,\n batch_size=self.batch_size, \n num_workers=self.num_workers, \n shuffle=True,\n drop_last=True,\n collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=True, max_step=self.max_steps))\n\n def val_dataloader(self):\n return DataLoader(self.valset, \n batch_size=self.batch_size, \n num_workers=self.num_workers, \n shuffle=False,\n collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))\n\n def test_dataloader(self):\n return DataLoader(self.testset, \n batch_size=self.batch_size, \n num_workers=self.num_workers, \n shuffle=False,\n collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))\n\n def load_data_module(self):\n name = self.dataset\n camel_name = ''.join([i.capitalize() for i in name.split('_')])\n try:\n self.data_module = getattr(importlib.import_module(\n '.'+name, package=__package__), camel_name)\n except:\n raise ValueError(\n f'Invalid Dataset File Name or Invalid Class Name data.{name}.{camel_name}')\n\n def instancialize(self, **other_args):\n \"\"\" Instancialize a model using the corresponding parameters\n from self.hparams dictionary. You can also input any args\n to overwrite the corresponding value in self.kwargs.\n \"\"\"\n class_args = inspect.getargspec(self.data_module.__init__).args[1:]\n inkeys = self.kwargs.keys()\n args1 = {}\n for arg in class_args:\n if arg in inkeys:\n args1[arg] = self.kwargs[arg]\n args1.update(other_args)\n return self.data_module(**args1)\n \n def load_prompt(self,prompt_path):\n if os.path.isfile(prompt_path):\n with open(prompt_path, 'r') as f:\n raw_prompts = f.read().splitlines()\n self.prompt_list = [p.strip() for p in raw_prompts]\n print('Load {} training prompts'.format(len(self.prompt_list)))\n print('Prompt Example \\n{}'.format(random.choice(self.prompt_list)))\n else:\n self.prompt_list = []"
},
{
"identifier": "SASRec",
"path": "recommender/A_SASRec_final_bce_llm.py",
"snippet": "class SASRec(nn.Module):\n def __init__(self, hidden_size, item_num, state_size, dropout, device, num_heads=1):\n super().__init__()\n self.state_size = state_size\n self.hidden_size = hidden_size\n self.item_num = int(item_num)\n self.dropout = nn.Dropout(dropout)\n self.device = device\n self.item_embeddings = nn.Embedding(\n num_embeddings=item_num + 1,\n embedding_dim=hidden_size,\n )\n nn.init.normal_(self.item_embeddings.weight, 0, 1)\n self.positional_embeddings = nn.Embedding(\n num_embeddings=state_size,\n embedding_dim=hidden_size\n )\n self.emb_dropout = nn.Dropout(dropout)\n self.ln_1 = nn.LayerNorm(hidden_size)\n self.ln_2 = nn.LayerNorm(hidden_size)\n self.ln_3 = nn.LayerNorm(hidden_size)\n self.mh_attn = MultiHeadAttention(hidden_size, hidden_size, num_heads, dropout)\n self.feed_forward = PositionwiseFeedForward(hidden_size, hidden_size, dropout)\n self.s_fc = nn.Linear(hidden_size, item_num)\n\n def forward(self, states, len_states):\n inputs_emb = self.item_embeddings(states)\n inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))\n seq = self.emb_dropout(inputs_emb)\n mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)\n seq *= mask\n seq_normalized = self.ln_1(seq)\n mh_attn_out = self.mh_attn(seq_normalized, seq)\n ff_out = self.feed_forward(self.ln_2(mh_attn_out))\n ff_out *= mask\n ff_out = self.ln_3(ff_out)\n state_hidden = extract_axis_1(ff_out, len_states - 1)\n supervised_output = self.s_fc(state_hidden).squeeze()\n return supervised_output\n\n def forward_eval(self, states, len_states):\n inputs_emb = self.item_embeddings(states)\n inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))\n seq = self.emb_dropout(inputs_emb)\n mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)\n seq *= mask\n seq_normalized = self.ln_1(seq)\n mh_attn_out = self.mh_attn(seq_normalized, seq)\n ff_out = self.feed_forward(self.ln_2(mh_attn_out))\n ff_out *= mask\n ff_out = self.ln_3(ff_out)\n state_hidden = extract_axis_1(ff_out, len_states - 1)\n supervised_output = self.s_fc(state_hidden).squeeze()\n return supervised_output\n \n def cacul_h(self, states, len_states):\n inputs_emb = self.item_embeddings(states)\n inputs_emb += self.positional_embeddings(torch.arange(self.state_size).to(self.device))\n seq = self.emb_dropout(inputs_emb)\n mask = torch.ne(states, self.item_num).float().unsqueeze(-1).to(self.device)\n seq *= mask\n seq_normalized = self.ln_1(seq)\n mh_attn_out = self.mh_attn(seq_normalized, seq)\n ff_out = self.feed_forward(self.ln_2(mh_attn_out))\n ff_out *= mask\n ff_out = self.ln_3(ff_out)\n state_hidden = extract_axis_1(ff_out, len_states - 1)\n\n return state_hidden\n \n def cacu_x(self, x):\n x = self.item_embeddings(x)\n\n return x"
},
{
"identifier": "Caser",
"path": "recommender/A_SASRec_final_bce_llm.py",
"snippet": "class Caser(nn.Module):\n def __init__(self, hidden_size, item_num, state_size, num_filters, filter_sizes,\n dropout_rate):\n super(Caser, self).__init__()\n self.hidden_size = hidden_size\n self.item_num = int(item_num)\n self.state_size = state_size\n self.filter_sizes = eval(filter_sizes)\n self.num_filters = num_filters\n self.dropout_rate = dropout_rate\n self.item_embeddings = nn.Embedding(\n num_embeddings=item_num + 1,\n embedding_dim=self.hidden_size,\n )\n\n # init embedding\n nn.init.normal_(self.item_embeddings.weight, 0, 0.01)\n\n # Horizontal Convolutional Layers\n self.horizontal_cnn = nn.ModuleList(\n [nn.Conv2d(1, self.num_filters, (i, self.hidden_size)) for i in self.filter_sizes])\n # Initialize weights and biases\n for cnn in self.horizontal_cnn:\n nn.init.xavier_normal_(cnn.weight)\n nn.init.constant_(cnn.bias, 0.1)\n\n # Vertical Convolutional Layer\n self.vertical_cnn = nn.Conv2d(1, 1, (self.state_size, 1))\n nn.init.xavier_normal_(self.vertical_cnn.weight)\n nn.init.constant_(self.vertical_cnn.bias, 0.1)\n\n # Fully Connected Layer\n self.num_filters_total = self.num_filters * len(self.filter_sizes)\n final_dim = self.hidden_size + self.num_filters_total\n self.s_fc = nn.Linear(final_dim, item_num)\n\n # dropout\n self.dropout = nn.Dropout(self.dropout_rate)\n\n def forward(self, states, len_states):\n input_emb = self.item_embeddings(states)\n mask = torch.ne(states, self.item_num).float().unsqueeze(-1)\n input_emb *= mask\n input_emb = input_emb.unsqueeze(1)\n pooled_outputs = []\n for cnn in self.horizontal_cnn:\n h_out = nn.functional.relu(cnn(input_emb))\n h_out = h_out.squeeze()\n p_out = nn.functional.max_pool1d(h_out, h_out.shape[2])\n pooled_outputs.append(p_out)\n\n h_pool = torch.cat(pooled_outputs, 1)\n h_pool_flat = h_pool.view(-1, self.num_filters_total)\n\n v_out = nn.functional.relu(self.vertical_cnn(input_emb))\n v_flat = v_out.view(-1, self.hidden_size)\n\n out = torch.cat([h_pool_flat, v_flat], 1)\n out = self.dropout(out)\n supervised_output = self.s_fc(out)\n\n return supervised_output\n\n def forward_eval(self, states, len_states):\n input_emb = self.item_embeddings(states)\n mask = torch.ne(states, self.item_num).float().unsqueeze(-1)\n input_emb *= mask\n input_emb = input_emb.unsqueeze(1)\n pooled_outputs = []\n for cnn in self.horizontal_cnn:\n h_out = nn.functional.relu(cnn(input_emb))\n h_out = h_out.squeeze()\n p_out = nn.functional.max_pool1d(h_out, h_out.shape[2])\n pooled_outputs.append(p_out)\n\n h_pool = torch.cat(pooled_outputs, 1)\n h_pool_flat = h_pool.view(-1, self.num_filters_total)\n\n v_out = nn.functional.relu(self.vertical_cnn(input_emb))\n v_flat = v_out.view(-1, self.hidden_size)\n\n out = torch.cat([h_pool_flat, v_flat], 1)\n out = self.dropout(out)\n supervised_output = self.s_fc(out)\n \n return supervised_output"
},
{
"identifier": "GRU",
"path": "recommender/A_SASRec_final_bce_llm.py",
"snippet": "class GRU(nn.Module):\n def __init__(self, hidden_size, item_num, state_size, gru_layers=1):\n super(GRU, self).__init__()\n self.hidden_size = hidden_size\n self.item_num = item_num\n self.state_size = state_size\n self.item_embeddings = nn.Embedding(\n num_embeddings=item_num + 1,\n embedding_dim=self.hidden_size,\n )\n nn.init.normal_(self.item_embeddings.weight, 0, 0.01)\n self.gru = nn.GRU(\n input_size=self.hidden_size,\n hidden_size=self.hidden_size,\n num_layers=gru_layers,\n batch_first=True\n )\n self.s_fc = nn.Linear(self.hidden_size, self.item_num)\n\n def forward(self, states, len_states):\n # Supervised Head\n emb = self.item_embeddings(states)\n emb_packed = torch.nn.utils.rnn.pack_padded_sequence(emb, len_states, batch_first=True, enforce_sorted=False)\n emb_packed, hidden = self.gru(emb_packed)\n hidden = hidden.view(-1, hidden.shape[2])\n supervised_output = self.s_fc(hidden)\n return supervised_output\n\n def forward_eval(self, states, len_states):\n # Supervised Head\n emb = self.item_embeddings(states)\n emb_packed = torch.nn.utils.rnn.pack_padded_sequence(emb, len_states, batch_first=True, enforce_sorted=False)\n emb_packed, hidden = self.gru(emb_packed)\n hidden = hidden.view(-1, hidden.shape[2])\n supervised_output = self.s_fc(hidden)\n\n return supervised_output"
}
] | import os
import pytorch_lightning as pl
import pytorch_lightning.callbacks as plc
from argparse import ArgumentParser
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
from model.model_interface import MInterface
from data.data_interface import DInterface
from recommender.A_SASRec_final_bce_llm import SASRec, Caser, GRU
from SASRecModules_ori import *
from transformers import LlamaForCausalLM, LlamaTokenizer | 6,825 |
def load_callbacks(args):
callbacks = []
callbacks.append(plc.EarlyStopping(
monitor='metric',
mode='max',
patience=10,
min_delta=0.001
))
callbacks.append(plc.ModelCheckpoint(
monitor='metric',
dirpath=args.ckpt_dir,
filename='{epoch:02d}-{metric:.3f}',
save_top_k=-1,
mode='max',
save_last=True,
#train_time_interval=args.val_check_interval
every_n_epochs=1
))
if args.lr_scheduler:
callbacks.append(plc.LearningRateMonitor(
logging_interval='step'))
return callbacks
def main(args):
pl.seed_everything(args.seed)
model = MInterface(**vars(args))
if args.ckpt_path:
ckpt = torch.load(args.ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'], strict=False)
print("load checkpoints from {}".format(args.ckpt_path))
|
def load_callbacks(args):
callbacks = []
callbacks.append(plc.EarlyStopping(
monitor='metric',
mode='max',
patience=10,
min_delta=0.001
))
callbacks.append(plc.ModelCheckpoint(
monitor='metric',
dirpath=args.ckpt_dir,
filename='{epoch:02d}-{metric:.3f}',
save_top_k=-1,
mode='max',
save_last=True,
#train_time_interval=args.val_check_interval
every_n_epochs=1
))
if args.lr_scheduler:
callbacks.append(plc.LearningRateMonitor(
logging_interval='step'))
return callbacks
def main(args):
pl.seed_everything(args.seed)
model = MInterface(**vars(args))
if args.ckpt_path:
ckpt = torch.load(args.ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'], strict=False)
print("load checkpoints from {}".format(args.ckpt_path))
| data_module = DInterface(llm_tokenizer=model.llama_tokenizer,**vars(args)) | 1 | 2023-11-09 12:19:17+00:00 | 8k |
silicx/GoldFromOres | DatasetCondensation/utils.py | [
{
"identifier": "MLP",
"path": "DatasetCondensation/networks.py",
"snippet": "class MLP(nn.Module):\r\n def __init__(self, channel, num_classes):\r\n super(MLP, self).__init__()\r\n self.fc_1 = nn.Linear(28*28*1 if channel==1 else 32*32*3, 128)\r\n self.fc_2 = nn.Linear(128, 128)\r\n self.fc_3 = nn.Linear(128, num_classes)\r\n\r\n def forward(self, x):\r\n out = x.view(x.size(0), -1)\r\n out = F.relu(self.fc_1(out))\r\n out = F.relu(self.fc_2(out))\r\n out = self.fc_3(out)\r\n return out\r"
},
{
"identifier": "ConvNet",
"path": "DatasetCondensation/networks.py",
"snippet": "class ConvNet(nn.Module):\r\n def __init__(self, channel, num_classes, net_width, net_depth, net_act, net_norm, net_pooling, im_size = (32,32)):\r\n super(ConvNet, self).__init__()\r\n\r\n self.features, shape_feat = self._make_layers(channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size)\r\n num_feat = shape_feat[0]*shape_feat[1]*shape_feat[2]\r\n self.classifier = nn.Linear(num_feat, num_classes)\r\n\r\n def forward(self, x):\r\n out = self.features(x)\r\n out = out.view(out.size(0), -1)\r\n out = self.classifier(out)\r\n return out\r\n\r\n def embed(self, x):\r\n out = self.features(x)\r\n out = out.view(out.size(0), -1)\r\n return out\r\n\r\n def _get_activation(self, net_act):\r\n if net_act == 'sigmoid':\r\n return nn.Sigmoid()\r\n elif net_act == 'relu':\r\n return nn.ReLU(inplace=True)\r\n elif net_act == 'leakyrelu':\r\n return nn.LeakyReLU(negative_slope=0.01)\r\n elif net_act == 'swish':\r\n return Swish()\r\n else:\r\n exit('unknown activation function: %s'%net_act)\r\n\r\n def _get_pooling(self, net_pooling):\r\n if net_pooling == 'maxpooling':\r\n return nn.MaxPool2d(kernel_size=2, stride=2)\r\n elif net_pooling == 'avgpooling':\r\n return nn.AvgPool2d(kernel_size=2, stride=2)\r\n elif net_pooling == 'none':\r\n return None\r\n else:\r\n exit('unknown net_pooling: %s'%net_pooling)\r\n\r\n def _get_normlayer(self, net_norm, shape_feat):\r\n # shape_feat = (c*h*w)\r\n if net_norm == 'batchnorm':\r\n return nn.BatchNorm2d(shape_feat[0], affine=True)\r\n elif net_norm == 'layernorm':\r\n return nn.LayerNorm(shape_feat, elementwise_affine=True)\r\n elif net_norm == 'instancenorm':\r\n return nn.GroupNorm(shape_feat[0], shape_feat[0], affine=True)\r\n elif net_norm == 'groupnorm':\r\n return nn.GroupNorm(4, shape_feat[0], affine=True)\r\n elif net_norm == 'none':\r\n return None\r\n else:\r\n exit('unknown net_norm: %s'%net_norm)\r\n\r\n def _make_layers(self, channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size):\r\n layers = []\r\n in_channels = channel\r\n if im_size[0] == 28:\r\n im_size = (32, 32)\r\n shape_feat = [in_channels, im_size[0], im_size[1]]\r\n for d in range(net_depth):\r\n layers += [nn.Conv2d(in_channels, net_width, kernel_size=3, padding=3 if channel == 1 and d == 0 else 1)]\r\n shape_feat[0] = net_width\r\n if net_norm != 'none':\r\n layers += [self._get_normlayer(net_norm, shape_feat)]\r\n layers += [self._get_activation(net_act)]\r\n in_channels = net_width\r\n if net_pooling != 'none':\r\n layers += [self._get_pooling(net_pooling)]\r\n shape_feat[1] //= 2\r\n shape_feat[2] //= 2\r\n\r\n return nn.Sequential(*layers), shape_feat\r"
},
{
"identifier": "LeNet",
"path": "DatasetCondensation/networks.py",
"snippet": "class LeNet(nn.Module):\r\n def __init__(self, channel, num_classes):\r\n super(LeNet, self).__init__()\r\n self.features = nn.Sequential(\r\n nn.Conv2d(channel, 6, kernel_size=5, padding=2 if channel==1 else 0),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n nn.Conv2d(6, 16, kernel_size=5),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n )\r\n self.fc_1 = nn.Linear(16 * 5 * 5, 120)\r\n self.fc_2 = nn.Linear(120, 84)\r\n self.fc_3 = nn.Linear(84, num_classes)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n x = F.relu(self.fc_1(x))\r\n x = F.relu(self.fc_2(x))\r\n x = self.fc_3(x)\r\n return x\r"
},
{
"identifier": "AlexNet",
"path": "DatasetCondensation/networks.py",
"snippet": "class AlexNet(nn.Module):\r\n def __init__(self, channel, num_classes):\r\n super(AlexNet, self).__init__()\r\n self.features = nn.Sequential(\r\n nn.Conv2d(channel, 128, kernel_size=5, stride=1, padding=4 if channel==1 else 2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n nn.Conv2d(128, 192, kernel_size=5, padding=2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n nn.Conv2d(192, 256, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 192, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(192, 192, kernel_size=3, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n )\r\n self.fc = nn.Linear(192 * 4 * 4, num_classes)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x\r\n\r\n def embed(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n return x\r"
},
{
"identifier": "AlexNetBN",
"path": "DatasetCondensation/networks.py",
"snippet": "class AlexNetBN(nn.Module):\r\n def __init__(self, channel, num_classes):\r\n super(AlexNetBN, self).__init__()\r\n self.features = nn.Sequential(\r\n nn.Conv2d(channel, 128, kernel_size=5, stride=1, padding=4 if channel==1 else 2),\r\n nn.BatchNorm2d(128),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n nn.Conv2d(128, 192, kernel_size=5, padding=2),\r\n nn.BatchNorm2d(192),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n nn.Conv2d(192, 256, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 192, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(192),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(192, 192, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(192),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n )\r\n self.fc = nn.Linear(192 * 4 * 4, num_classes)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x\r\n\r\n def embed(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n return x\r"
},
{
"identifier": "VGG11",
"path": "DatasetCondensation/networks.py",
"snippet": "def VGG11(channel, num_classes):\r\n return VGG('VGG11', channel, num_classes)\r"
},
{
"identifier": "VGG11BN",
"path": "DatasetCondensation/networks.py",
"snippet": "def VGG11BN(channel, num_classes):\r\n return VGG('VGG11', channel, num_classes, norm='batchnorm')\r"
},
{
"identifier": "ResNet18",
"path": "DatasetCondensation/networks.py",
"snippet": "def ResNet18(channel, num_classes):\r\n return ResNet(BasicBlock, [2,2,2,2], channel=channel, num_classes=num_classes)\r"
},
{
"identifier": "ResNet18BN_AP",
"path": "DatasetCondensation/networks.py",
"snippet": "def ResNet18BN_AP(channel, num_classes):\r\n return ResNet_AP(BasicBlock_AP, [2,2,2,2], channel=channel, num_classes=num_classes, norm='batchnorm')\r"
},
{
"identifier": "ResNet18BN",
"path": "DatasetCondensation/networks.py",
"snippet": "def ResNet18BN(channel, num_classes):\r\n return ResNet(BasicBlock, [2,2,2,2], channel=channel, num_classes=num_classes, norm='batchnorm')\r"
}
] | import time
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from scipy.ndimage.interpolation import rotate as scipyrotate
from .networks import MLP, ConvNet, LeNet, AlexNet, AlexNetBN, VGG11, VGG11BN, ResNet18, ResNet18BN_AP, ResNet18BN
| 3,642 |
def get_dataset(dataset, data_path):
if dataset == 'MNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation
dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'TinyImageNet':
channel = 3
im_size = (64, 64)
num_classes = 200
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')
class_names = data['classes']
images_train = data['images_train']
labels_train = data['labels_train']
images_train = images_train.detach().float() / 255.0
labels_train = labels_train.detach()
for c in range(channel):
images_train[:,c] = (images_train[:,c] - mean[c])/std[c]
dst_train = TensorDataset(images_train, labels_train) # no augmentation
images_val = data['images_val']
labels_val = data['labels_val']
images_val = images_val.detach().float() / 255.0
labels_val = labels_val.detach()
for c in range(channel):
images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]
dst_test = TensorDataset(images_val, labels_val) # no augmentation
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
def get_default_convnet_setting():
net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling'
return net_width, net_depth, net_act, net_norm, net_pooling
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
|
def get_dataset(dataset, data_path):
if dataset == 'MNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation
dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'TinyImageNet':
channel = 3
im_size = (64, 64)
num_classes = 200
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')
class_names = data['classes']
images_train = data['images_train']
labels_train = data['labels_train']
images_train = images_train.detach().float() / 255.0
labels_train = labels_train.detach()
for c in range(channel):
images_train[:,c] = (images_train[:,c] - mean[c])/std[c]
dst_train = TensorDataset(images_train, labels_train) # no augmentation
images_val = data['images_val']
labels_val = data['labels_val']
images_val = images_val.detach().float() / 255.0
labels_val = labels_val.detach()
for c in range(channel):
images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]
dst_test = TensorDataset(images_val, labels_val) # no augmentation
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
def get_default_convnet_setting():
net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling'
return net_width, net_depth, net_act, net_norm, net_pooling
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
| net = MLP(channel=channel, num_classes=num_classes)
| 0 | 2023-11-03 09:34:15+00:00 | 8k |
WHU-USI3DV/PatchAugNet | utils/model_util/feat_processor.py | [
{
"identifier": "TransformerEncoderLayer",
"path": "utils/model_util/transformer.py",
"snippet": "class TransformerEncoderLayer(nn.Module):\n\n def __init__(self,\n d_model, \n nhead, \n dim_feedforward, \n mha_dropout, \n ffn_dropout, \n activation, \n normalize_before,\n attn_mode='full', # linear or full attention\n seq_len=None, # sequence length for linear attention, i.e. num of input tokens\n proj_k=128, # the projected dimension 'k' in Linformer paper. Default: 128\n param_sharing=None # parameter sharing mode: layerwise, none.headwise is not implemented.Default: none.\n ):\n super().__init__()\n\n if attn_mode == 'full':\n self.mha = nn.MultiheadAttention(d_model, nhead, dropout=mha_dropout)\n else:\n self.mha = LinearMultiheadAttention(d_model, nhead, dropout=mha_dropout,\n seq_len=seq_len, proj_k=proj_k,\n param_sharing=param_sharing)\n\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(ffn_dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(ffn_dropout)\n self.dropout2 = nn.Dropout(ffn_dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def forward_post(self, src, tgt):\n # if self-attention the q,k,v is the same, either all src or all target\n q, k, v = src, tgt, tgt\n\n # MHA\n src2 = self.mha(query=q, key=k, value=v)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # FFN\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward_pre(self, src, tgt):\n src2 = self.norm1(src)\n q, k, v = src2, src2, src2\n\n # MHA \n src2 = self.mha(query=q, key=k, value=v)[0]\n src = src + self.dropout1(src2)\n\n # FFN\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src\n\n def forward(self, src, tgt):\n\n if self.normalize_before:\n return self.forward_pre(src, tgt)\n else:\n return self.forward_post(src, tgt)"
},
{
"identifier": "NetVLADBase",
"path": "place_recognition/patch_aug_net/models/loupe.py",
"snippet": "class NetVLADBase(nn.Module):\n def __init__(self, feature_size, max_samples, cluster_size, output_dim,\n gating=True, add_batch_norm=True):\n super(NetVLADBase, self).__init__()\n self.feature_size = feature_size\n self.max_samples = max_samples\n self.output_dim = output_dim\n self.gating = gating\n self.add_batch_norm = add_batch_norm\n self.cluster_size = cluster_size # K\n self.softmax = nn.Softmax(dim=-1)\n\n self.cluster_weights = nn.Parameter(\n torch.randn(feature_size, cluster_size) * 1 / math.sqrt(feature_size))\n self.cluster_weights2 = nn.Parameter(\n torch.randn(1, feature_size, cluster_size) * 1 / math.sqrt(feature_size))\n self.hidden1_weights = nn.Parameter(\n torch.randn(feature_size * cluster_size, output_dim) * 1 / math.sqrt(feature_size))\n\n if add_batch_norm:\n self.cluster_biases = None\n self.bn1 = nn.BatchNorm1d(cluster_size)\n else:\n self.cluster_biases = nn.Parameter(\n torch.randn(cluster_size) * 1 / math.sqrt(feature_size)) # attention initialization\n self.bn1 = None\n\n self.bn2 = nn.BatchNorm1d(output_dim)\n\n if gating:\n self.context_gating = GatingContext(output_dim, add_batch_norm=add_batch_norm)\n\n def forward(self, x):\n x = x.transpose(1, 3).contiguous() # B x 1024 x N x 1 -> B x 1 x N x 1024\n x = x.view((-1, self.max_samples, self.feature_size)) # B x N x 1024\n\n activation = torch.matmul(x, self.cluster_weights) # B x N x 1024 X 1024 x 64 -> B x N x 64\n if self.add_batch_norm:\n # activation = activation.transpose(1,2).contiguous()\n activation = activation.view(-1, self.cluster_size) # B x N x 64 -> BN x 64\n activation = self.bn1(activation) # BN x 64\n activation = activation.view(-1, self.max_samples, self.cluster_size) # BN x 64 -> B x N x 64\n # activation = activation.transpose(1,2).contiguous()\n else:\n activation = activation + self.cluster_biases # B x N x 64 + 64 -> B x N x 64\n\n activation = self.softmax(activation) # B x N x 64 --(dim=-1)--> B x N x 64\n\n # activation = activation[:,:,:64]\n activation = activation.view((-1, self.max_samples, self.cluster_size)) # B x N x 64\n\n a_sum = activation.sum(-2, keepdim=True) # B x N x K --(dim=-2)--> B x 1 x K\n a = a_sum * self.cluster_weights2 # B x 1 x K X 1 x C x K -> B x C x K\n # element-wise multiply, broadcast mechanism\n\n activation = torch.transpose(activation, 2, 1) # B x N x 64 -> B x 64 x N\n\n x = x.view((-1, self.max_samples, self.feature_size)) # B x N x C -> B x N x C\n vlad = torch.matmul(activation, x) # B x K x N X B x N x C -> B x K x C\n vlad = torch.transpose(vlad, 2, 1) # B x K x C -> B x C x K\n vlad = vlad - a # B x C x K - B x C x K -> B x C x K\n\n vlad = F.normalize(vlad, dim=1, p=2).contiguous() # B x C x K -> B x C x K\n return vlad"
},
{
"identifier": "get_pool",
"path": "utils/model_util/pool.py",
"snippet": "def get_pool(pool_name):\n if pool_name == 'avg':\n return nn.AdaptiveAvgPool2d((1, 1))\n elif pool_name == 'max':\n return nn.AdaptiveMaxPool2d((1, 1))\n elif pool_name == 'gem':\n return GeMPooling(norm=3)\n else:\n raise AttributeError('not support pooling way')"
},
{
"identifier": "nn_dist",
"path": "utils/train_util.py",
"snippet": "def nn_dist(c):\n # c: m x 3, or b x m x 3; Return: m x m, or b x m x m\n if len(c.shape) == 2:\n c1 = torch.unsqueeze(c, dim=1)\n c2 = c[None, ...]\n elif len(c.shape) == 3:\n c1 = torch.unsqueeze(c, dim=2)\n c2 = c[:, None, ...]\n return torch.sum((c1 - c2)**2, dim=-1) ** 0.5"
},
{
"identifier": "nn_angle",
"path": "utils/train_util.py",
"snippet": "def nn_angle(c, k=3):\n # c: m x 3, or b x m x 3\n knn = knn_cuda.KNN(k=k+1, transpose_mode=True)\n if len(c.shape) == 2:\n c = c.unsqueeze(0) # 1 x m x 3\n # nearest k neighborhood\n _, index = knn(c, c) # b x m x (k+1)\n index = index[..., 1:] # b x m x k\n\n # cos_angle = []\n # for i in range(index.shape[0]):\n # c_i = c[i] # m x 3\n # c0 = c_i[:, None, :] # m x 1 x 3\n # c1 = c_i[None, ...] # 1 x m x 3\n # index_i = index[i] # m x k\n # c2 = c_i[index_i] # m x k x 3\n # c01 = c1 - c0 # m x m x 3\n # c02 = c2 - c0 # m x k x 3\n # c01 = c01.unsqueeze(0) # 1 x m x m x 3\n # c02 = c02.unsqueeze(0).transpose(0, 2).contiguous() # k x m x 1 x 3\n # angle_i = F.cosine_similarity(c01, c02, dim=-1).unsqueeze(0) # 1 x k x m x m\n # cos_angle.append(angle_i)\n # cos_angle = torch.cat(cos_angle, dim=0) # b x k x m x m\n\n c0 = c[..., None, :] # b x m x 1 x 3\n c1 = c[:, None, ...] # b x 1 x m x 3\n c2 = []\n for i in range(index.shape[0]):\n c2_i = c[i][index[i]] # m x k x 3\n c2.append(c2_i.unsqueeze(0)) # 1 x m x k x 3\n c2 = torch.cat(c2, dim=0) # b x m x k x 3\n c01 = c1 - c0 # b x m x m x 3\n c02 = c2 - c0 # b x m x k x 3\n c01 = c01.unsqueeze(1) # b x 1 x m x m x 3\n c02 = c02.unsqueeze(0).permute(1, 3, 2, 0, 4) # b x k x m x 1 x 3\n cos_angle = F.cosine_similarity(c01, c02, dim=-1) # b x k x m x m\n return cos_angle"
}
] | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from utils.model_util.transformer import TransformerEncoderLayer
from place_recognition.patch_aug_net.models.loupe import NetVLADBase
from utils.model_util.pool import get_pool
from utils.train_util import nn_dist, nn_angle | 4,097 | super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([copy.deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = attention(query, key, value)
# self.prob.append(prob)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
self.only_self_attn = True
for name in layer_names:
if name == 'cross':
self.only_self_attn = False
break
def forward(self, desc0, desc1=None):
""" desc0: b x m x d, desc1: b x n x d """
# only self-attn
if self.only_self_attn or desc1 is None:
desc0 = desc0.permute(0, 2, 1) # b x d x m
for layer, name in zip(self.layers, self.names):
delta0 = layer(desc0, desc0)
desc0 = desc0 + delta0
desc0 = desc0.permute(0, 2, 1) # b x m x d
return desc0
# with cross-attn
desc0 = desc0.permute(0, 2, 1) # b x d x m
desc1 = desc1.permute(0, 2, 1) # b x d x n
for layer, name in zip(self.layers, self.names):
layer.attn.prob = []
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
desc0 = desc0.permute(0, 2, 1) # b x m x d
desc1 = desc1.permute(0, 2, 1) # b x n x d
return desc0, desc1
class AbsCoordEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, coord_dim, embed_dim):
super(AbsCoordEncoder, self).__init__()
self.fc = nn.Sequential(
nn.Linear(coord_dim, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
return self.fc(x)
class DistanceEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, max_dist=None):
super(DistanceEncoder, self).__init__()
self.max_dist = max_dist
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
dist = nn_dist(x - torch.mean(x, dim=1, keepdim=True)).float() # B x N x N
if self.max_dist is not None:
max_dist_fill = torch.ones_like(dist) * self.max_dist
dist = torch.where(dist > self.max_dist, max_dist_fill, dist)
x = self.fc(dist / torch.max(dist)) # B x N x d
return x
class AngleEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, angle_k=None):
super(AngleEncoder, self).__init__()
self.angle_k = angle_k
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
self.max_pool = nn.AdaptiveMaxPool1d(1)
def forward(self, x):
|
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron """
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
if do_bn:
# layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.InstanceNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
class MultiHeadedAttention(nn.Module):
""" Multi-head attention to increase model expressivitiy """
def __init__(self, num_heads: int, d_model: int):
super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([copy.deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = attention(query, key, value)
# self.prob.append(prob)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
self.only_self_attn = True
for name in layer_names:
if name == 'cross':
self.only_self_attn = False
break
def forward(self, desc0, desc1=None):
""" desc0: b x m x d, desc1: b x n x d """
# only self-attn
if self.only_self_attn or desc1 is None:
desc0 = desc0.permute(0, 2, 1) # b x d x m
for layer, name in zip(self.layers, self.names):
delta0 = layer(desc0, desc0)
desc0 = desc0 + delta0
desc0 = desc0.permute(0, 2, 1) # b x m x d
return desc0
# with cross-attn
desc0 = desc0.permute(0, 2, 1) # b x d x m
desc1 = desc1.permute(0, 2, 1) # b x d x n
for layer, name in zip(self.layers, self.names):
layer.attn.prob = []
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
desc0 = desc0.permute(0, 2, 1) # b x m x d
desc1 = desc1.permute(0, 2, 1) # b x n x d
return desc0, desc1
class AbsCoordEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, coord_dim, embed_dim):
super(AbsCoordEncoder, self).__init__()
self.fc = nn.Sequential(
nn.Linear(coord_dim, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
return self.fc(x)
class DistanceEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, max_dist=None):
super(DistanceEncoder, self).__init__()
self.max_dist = max_dist
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
dist = nn_dist(x - torch.mean(x, dim=1, keepdim=True)).float() # B x N x N
if self.max_dist is not None:
max_dist_fill = torch.ones_like(dist) * self.max_dist
dist = torch.where(dist > self.max_dist, max_dist_fill, dist)
x = self.fc(dist / torch.max(dist)) # B x N x d
return x
class AngleEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, angle_k=None):
super(AngleEncoder, self).__init__()
self.angle_k = angle_k
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
self.max_pool = nn.AdaptiveMaxPool1d(1)
def forward(self, x): | x = F.normalize(nn_angle(x, self.angle_k), dim=-1) # b x k x m x m | 4 | 2023-11-02 13:52:20+00:00 | 8k |
gchada/ROAM | sim/rail_walker_interface/environment/env.py | [
{
"identifier": "BaseWalker",
"path": "sim/rail_walker_interface/robot/robot.py",
"snippet": "class BaseWalker(Generic[_ObsT]):\n def __init__(\n self, \n name: Optional[str] = \"robot\", \n Kp: float = 5,\n Kd: float = 1,\n force_real_control_timestep : bool = False,\n limit_action_range : float = 1.0,\n power_protect_factor : float = 0.1\n ):\n assert limit_action_range > 0 and limit_action_range <= 1.0\n self.name = name\n self.Kp = Kp\n self.Kd = Kd\n self.force_real_control_timestep = force_real_control_timestep\n self._last_control_t = 0.0\n self.limit_action_range = limit_action_range\n self._power_protect_factor = power_protect_factor\n\n @property\n def is_real_robot(self) -> bool:\n return False\n\n @property\n def power_protect_factor(self) -> float:\n return self._power_protect_factor\n \n @power_protect_factor.setter\n def power_protect_factor(self, value: float) -> None:\n assert value >= 0 and value <= 1.0\n self._power_protect_factor = value\n\n \"\"\"\n The control_timestep is the time interval between two consecutive model control actions.\n \"\"\"\n @property\n def control_timestep(self) -> float:\n pass\n \n @property\n def action_interpolation(self) -> bool:\n pass\n\n \"\"\"\n The control_subtimestep is the time interval between two consecutive internal control actions. It will also be the physics timestep if in simulation.\n \"\"\"\n @property\n def control_subtimestep(self) -> float:\n pass\n\n def receive_observation(self) -> bool:\n pass\n\n @property\n def joint_qpos_init(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_sitting(self) -> np.ndarray:\n pass\n\n @cached_property\n def joint_qpos_crouch(self) -> np.ndarray:\n return (self.joint_qpos_init + self.joint_qpos_sitting) / 2.0\n\n \"\"\"\n This property will be used to determine the standing range of qpos of the robot.\n \"\"\"\n @property\n def joint_qpos_offset(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_mins(self) -> np.ndarray:\n pass\n\n @property\n def joint_qpos_maxs(self) -> np.ndarray:\n pass\n\n def reset(self) -> None:\n pass\n\n def get_3d_linear_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_local_velocity(self) -> np.ndarray:\n pass\n\n def get_3d_angular_velocity(self) -> np.ndarray:\n pass\n\n def get_framequat_wijk(self) -> np.ndarray:\n pass\n\n def get_roll_pitch_yaw(self) -> np.ndarray:\n pass\n\n def get_last_observation(self) -> Optional[_ObsT]:\n pass\n\n def get_3d_acceleration_local(self) -> np.ndarray:\n pass\n\n def get_joint_qpos(self) -> np.ndarray:\n pass\n\n def get_joint_qvel(self) -> np.ndarray:\n pass\n\n def get_joint_qacc(self) -> np.ndarray:\n pass\n\n def get_joint_torques(self) -> np.ndarray:\n pass\n\n def _apply_action(self, action: np.ndarray) -> bool:\n pass\n\n def close(self) -> None:\n pass\n\n def __del__(self):\n self.close()\n \n @property\n def action_qpos_mins(self) -> np.ndarray:\n return (self.joint_qpos_mins - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n \n @property\n def action_qpos_maxs(self) -> np.ndarray:\n return (self.joint_qpos_maxs - self.joint_qpos_init) * self.limit_action_range + self.joint_qpos_init\n\n def apply_action(self, action: np.ndarray) -> bool:\n action = np.clip(action, self.action_qpos_mins, self.action_qpos_maxs)\n \n if not self.force_real_control_timestep:\n return self._apply_action(action)\n else:\n t = time.time()\n dt = t - self._last_control_t\n if dt >= self.control_timestep:\n self._last_control_t = t\n return self._apply_action(action)\n else:\n time_to_sleep = self.control_timestep - dt\n time.sleep(time_to_sleep)\n self._last_control_t = t + time_to_sleep\n return self._apply_action(action)\n\n def can_apply_action(self) -> bool:\n t = time.time()\n dt = t - self._last_control_t\n if (not self.force_real_control_timestep) or dt >= self.control_timestep:\n return True\n else:\n return False\n\n def async_apply_action(self, action: np.ndarray) -> bool:\n if self.can_apply_action():\n self._last_control_t = time.time()\n return self._apply_action(action)\n else:\n return False\n\n @cached_property\n def joint_nums(self) -> int:\n return len(self.joint_qpos_init)\n \n @cached_property\n def action_spec(self) -> gym.spaces.Box:\n return gym.spaces.Box(\n low=self.joint_qpos_mins, \n high=self.joint_qpos_maxs, \n shape=(self.joint_nums,),\n dtype=np.float32\n )\n\n def unwrapped(self):\n return self"
},
{
"identifier": "JoystickPolicy",
"path": "sim/rail_walker_interface/joystick_policy/joystick_policy.py",
"snippet": "class JoystickPolicy:\n def __init__(\n self,\n robot: BaseWalker,\n reward_provider: JoystickPolicyRewardProvider,\n target_yaw_provider: JoystickPolicyTargetProvider,\n termination_providers: list[JoystickPolicyTerminationConditionProvider],\n truncation_providers: list[JoystickPolicyTerminationConditionProvider],\n resetters: list[JoystickPolicyResetter],\n initializers: list[JoystickPolicyResetter] = [],\n target_observable: Optional[JoystickPolicyTargetObservable] = None,\n enabled_observables: list[str] = [\n \"joints_pos\",\n \"joints_vel\",\n \"imu\",\n \"sensors_local_velocimeter\",\n \"torques\",\n \"foot_contacts\",\n ],\n lock_target: bool = False,\n enable_target_custom_obs=True\n ):\n self.robot = robot\n self.reward_provider = reward_provider\n self.target_yaw_provider = target_yaw_provider\n self.termination_providers = termination_providers\n self.truncation_providers = truncation_providers\n self.resetters = resetters\n self.initializers = initializers\n self.target_observable = target_observable\n self.enabled_observables = enabled_observables\n self.lock_target = lock_target\n self.enable_target_custom_obs = enable_target_custom_obs\n\n # Temporary Variables\n self._step_target_qpos = self.robot.get_joint_qpos()\n\n # Set up task-specific variables\n self._target_goal_world_delta = np.zeros(2)\n self._target_goal_local = np.zeros(2)\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._target_velocity = 0.0\n self._target_custom_data = None\n self._rew_step = 0.0\n self._info_dict = {}\n self._has_after_after_step = False\n self._termination_reason: Optional[JoystickPolicyTerminationConditionProvider] = None\n self._truncation_reason: Optional[JoystickPolicyTerminationConditionProvider] = None\n self._inited = False\n\n @property\n def has_after_after_step(self) -> bool:\n return self._has_after_after_step\n\n @property\n def control_timestep(self) -> float:\n return self.robot.control_timestep\n\n @control_timestep.setter\n def control_timestep(self, value: float) -> None:\n self.robot.control_timestep = value\n\n @property\n def last_info(self) -> dict[str, Any]:\n return self._info_dict.copy()\n\n @property\n def control_subtimestep(self) -> float:\n return self.robot.control_subtimestep\n\n @control_subtimestep.setter\n def control_subtimestep(self, value: float) -> None:\n self.robot.control_subtimestep = value\n\n @property\n def target_yaw(self) -> float:\n return self._target_yaw\n\n @property\n def target_delta_yaw(self) -> float:\n return self._target_delta_yaw\n\n @property\n def target_goal_world_delta(self) -> np.ndarray:\n return self._target_goal_world_delta.copy()\n\n @property\n def target_goal_local(self) -> np.ndarray:\n return self._target_goal_local.copy()\n\n @property\n def target_custom_data(self) -> Optional[Any]:\n return self._target_custom_data\n\n @property\n def target_goal_world_delta_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_world_delta)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_world_delta / norm_goal\n\n @property\n def target_goal_local_unit(self) -> np.ndarray:\n norm_goal = np.linalg.norm(self._target_goal_local)\n if norm_goal == 0.0:\n return np.zeros(2)\n else:\n return self._target_goal_local / norm_goal\n\n def __update_target(self) -> float:\n new_target_goal_world_delta = self.target_yaw_provider.get_target_goal_world_delta(self.robot)[:2]\n new_target_velocity = self.target_yaw_provider.get_target_velocity(self.robot)\n _, _, yaw = self.robot.get_roll_pitch_yaw()\n inv_rotation_mat = np.array([\n [np.cos(yaw), np.sin(yaw)],\n [-np.sin(yaw), np.cos(yaw)]\n ])\n new_target_goal_local = inv_rotation_mat @ new_target_goal_world_delta\n\n new_target_yaw = np.arctan2(new_target_goal_world_delta[1], new_target_goal_world_delta[0]) if np.linalg.norm(\n new_target_goal_world_delta) > 0.0 else 0.0\n new_target_delta_yaw = normalize_rad(new_target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = self.__get_change_in_abs_target_delta_yaw()\n\n self._info_dict[\"target_yaw\"] = new_target_yaw\n self._info_dict[\"target_delta_yaw\"] = new_target_delta_yaw\n self._info_dict[\"target_goal_local_x\"] = new_target_goal_local[0]\n self._info_dict[\"target_goal_local_y\"] = new_target_goal_local[1]\n self._info_dict[\"target_goal_world_delta_x\"] = new_target_goal_world_delta[0]\n self._info_dict[\"target_goal_world_delta_y\"] = new_target_goal_world_delta[1]\n self._info_dict[\"change_in_abs_target_delta_yaw\"] = change_in_abs_target_delta_yaw\n self._info_dict[\"abs_target_delta_yaw\"] = np.abs(new_target_delta_yaw)\n self._info_dict[\"target_velocity\"] = new_target_velocity\n\n self._target_yaw = new_target_yaw\n self._target_delta_yaw = new_target_delta_yaw\n self._target_goal_local = new_target_goal_local\n self._target_goal_world_delta = new_target_goal_world_delta\n self._target_custom_data = self.target_yaw_provider.get_target_custom_data()\n self._target_velocity = new_target_velocity\n return change_in_abs_target_delta_yaw\n\n def __get_change_in_abs_target_delta_yaw(self) -> float:\n new_target_delta_yaw = normalize_rad(self.target_yaw - self.robot.get_roll_pitch_yaw()[2])\n change_in_abs_target_delta_yaw = np.abs(new_target_delta_yaw) - np.abs(self._target_delta_yaw)\n return change_in_abs_target_delta_yaw\n\n def before_step(\n self,\n action: np.ndarray,\n random_state: np.random.RandomState\n ):\n self._step_target_qpos = action\n self.robot.apply_action(action)\n\n def get_reward(\n self\n ):\n return self._rew_step\n\n def get_reward_final(self):\n return self._rew_step_final\n\n def after_step(\n self,\n random_state: np.random.RandomState\n ) -> dict[str, Any]:\n self._info_dict = {}\n self.robot.receive_observation()\n\n # Update the target yaw\n self.target_yaw_provider.step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n self._has_after_after_step = self.target_yaw_provider.has_target_changed()\n if not self.lock_target and self._has_after_after_step:\n change_in_abs_target_delta_yaw = self.after_after_step(\n random_state\n )\n else:\n change_in_abs_target_delta_yaw = self.__update_target()\n\n # Gather info about velocity\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_norm = np.linalg.norm(robot_v)\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n robot_v_local = self.robot.get_3d_local_velocity()\n robot_rpy = self.robot.get_roll_pitch_yaw()\n self._info_dict[\"velocity_norm\"] = robot_v_norm\n self._info_dict[\"velocity_to_goal\"] = robot_v_to_goal\n self._info_dict[\"velocity_local_x\"] = robot_v_local[0]\n self._info_dict[\"velocity_local_y\"] = robot_v_local[1]\n self._info_dict[\"velocity_local_z\"] = robot_v_local[2]\n self._info_dict[\"roll\"] = robot_rpy[0]\n self._info_dict[\"pitch\"] = robot_rpy[1]\n self._info_dict[\"yaw\"] = robot_rpy[2]\n self._info_dict[\"joint_torques\"] = np.mean(np.abs(self.robot.get_joint_torques()))\n self._info_dict[\"joint_qvels\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n self._info_dict[\"joint_qaccs\"] = np.mean(np.abs(self.robot.get_joint_qacc()))\n self._info_dict[\"joint_velocities\"] = np.mean(np.abs(self.robot.get_joint_qvel()))\n if hasattr(self.robot, \"get_foot_force\"):\n foot_force: np.ndarray = self.robot.get_foot_force()\n if foot_force.shape == (4,):\n foot_force_names = [\"FR\", \"FL\", \"RR\", \"RL\"]\n else:\n foot_force_names = list(range(foot_force.shape[0]))\n for i in range(len(foot_force_names)):\n self._info_dict[\"foot_force_\" + foot_force_names[i]] = foot_force[i]\n\n self.reward_provider.step_reward(\n self.robot,\n self._step_target_qpos,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n reward_perstep = self.reward_provider.get_reward()\n # assert reward_perstep is not None and reward_perstep != np.nan\n self._info_dict[\"reward_perstep\"] = reward_perstep\n self._rew_step = reward_perstep\n self._rew_step_final = self.reward_provider.get_reward_final()\n\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # Step resetters\n for resetter in self.resetters:\n resetter.step_resetter(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # Step termination providers\n for termination_provider in self.termination_providers:\n termination_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if termination_provider.should_terminate():\n print(\"Termination provider\", termination_provider, \"terminated the episode\")\n self._termination_reason = termination_provider\n break\n\n # Step truncaiton providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.step_termination_condition(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n if truncation_provider.should_terminate():\n print(\"Truncation provider\", truncation_provider, \"truncated the episode\")\n self._truncation_reason = truncation_provider\n break\n\n return self._info_dict.copy()\n\n def after_after_step(\n self,\n random_state: np.random.RandomState\n ):\n if self._has_after_after_step:\n self.target_yaw_provider.after_step_target(\n self.robot,\n self._info_dict,\n random_state\n )\n change_in_abs_target_delta_yaw = self.__update_target()\n robot_v = self.robot.get_3d_linear_velocity()\n robot_v_to_goal = np.dot(\n robot_v[:2], self.target_goal_world_delta_unit\n )\n # Step the target yaw observable\n if self.target_observable is not None:\n self.target_observable.step_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n robot_v_to_goal,\n change_in_abs_target_delta_yaw,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._info_dict,\n random_state\n )\n\n # self.reward_provider.step_ex(\n # self.robot,\n # self.target_goal_world_delta,\n # self.target_goal_local,\n # self.target_yaw,\n # self.target_delta_yaw,\n # robot_v_to_goal,\n # change_in_abs_target_delta_yaw,\n # self._target_custom_data,\n # self.enable_target_custom_obs,\n # self._info_dict,\n # random_state\n # )\n # reward_perstep = self.reward_provider.get_reward()\n # #assert reward_perstep is not None and reward_perstep != np.nan\n # self._rew_step = reward_perstep\n\n self._has_after_after_step = False\n return change_in_abs_target_delta_yaw\n else:\n return 0.0\n\n def reset(self, random_state: np.random.RandomState) -> dict[str, Any]:\n self.robot.receive_observation()\n # Reset the info dict\n self._info_dict = {}\n\n # Reset the task-specific variables\n self._target_yaw = 0.0\n self._target_delta_yaw = 0.0\n self._has_after_after_step = False\n\n if not self._inited:\n self._inited = True\n for initializer in self.initializers:\n initializer.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # call the resetters\n for resetter in self.resetters:\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset the target yaw provider\n self.target_yaw_provider.reset_target(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n self.__update_target()\n\n # Reset target yaw obs\n if self.target_observable is not None:\n self.target_observable.reset_target_obs(\n self.robot,\n self.target_goal_world_delta,\n self.target_goal_local,\n self.target_yaw,\n self.target_delta_yaw,\n self._target_velocity,\n self._info_dict,\n self._target_custom_data,\n self.enable_target_custom_obs,\n self._termination_reason,\n random_state\n )\n\n # Reset reward provider\n self.reward_provider.reset_reward(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset termination providers\n for termination_provider in self.termination_providers:\n termination_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n # Reset truncation providers\n for truncation_provider in self.truncation_providers:\n truncation_provider.reset_termination_condition(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n self._termination_reason = None\n self._truncation_reason = None\n self._rew_step = 0.0\n\n # Reset the robot\n self.robot.reset()\n self.robot.receive_observation()\n\n for resetter in self.resetters:\n if hasattr(resetter, \"last_position\"):\n resetter.perform_reset(\n self.robot,\n self._info_dict,\n self._termination_reason,\n random_state\n )\n\n return self._info_dict.copy()\n\n def should_terminate(self) -> bool:\n return self._termination_reason is not None\n\n def should_truncate(self) -> bool:\n return self._truncation_reason is not None"
}
] | from ..robot import BaseWalker
from ..joystick_policy.joystick_policy import JoystickPolicy | 5,617 |
class WalkerEnvironment:
@property
def robot(self) -> BaseWalker:
pass
class JoystickEnvironment:
@property
|
class WalkerEnvironment:
@property
def robot(self) -> BaseWalker:
pass
class JoystickEnvironment:
@property | def joystick_policy(self) -> JoystickPolicy: | 1 | 2023-11-02 23:21:38+00:00 | 8k |
UMass-Foundation-Model/genome | main.py | [
{
"identifier": "parse_opt",
"path": "param.py",
"snippet": "def parse_opt():\n\n parser = argparse.ArgumentParser()\n # Data input settings\n\n # Dataset and Image\n parser.add_argument('--dataset', type=str, default=\"gqa\", help='') # Pending\n\n parser.add_argument('--ann_path', type=str, default=\"\", help='')\n parser.add_argument('--image_path', type=str, default=\"\", help='')\n parser.add_argument('--dataset_dir', type=str, default=\"\", help='')\n parser.add_argument('--output_dir', type=str, default=\"\", help='')\n parser.add_argument('--reuse_dir', type=str, default=\"\", help='')\n parser.add_argument('--split', type=str, default=\"test\", help='')\n\n parser.add_argument('--last_stage_output_dir', type=str, default=\"\", help='')\n parser.add_argument('--threshold', type=float, default=0.5, help='')\n\n parser.add_argument('--coco_dir', type=str, default=\"\", help='')\n\n parser.add_argument('--temperature', type=float, default=0, help='')\n parser.add_argument('--begin', type=int, default=0, help='')\n\n # Bool\n parser.add_argument('--use_new_module', action='store_true', default=False)\n parser.add_argument('--save_output', action='store_true', default=False)\n parser.add_argument('--add_cases', action='store_true', default=False)\n parser.add_argument('--split_cases', action='store_true', default=False)\n parser.add_argument('--save_all_module', action='store_true', default=False)\n parser.add_argument('--save_case_result', action='store_true', default=False)\n parser.add_argument('--save_prog_state', action='store_true', default=False)\n\n # Prompt\n parser.add_argument('--learning_prompt_path', type=str, help=\"\", default='./prompts/learning_prompt_debug.txt')\n parser.add_argument('--module_make_prompt_path', type=str, help=\"\", default='./prompts/module_make_prompt.txt')\n parser.add_argument('--online_prompt_path', type=str, help=\"\", default='./prompts/learning_prompt_online.txt')\n parser.add_argument('--offline_prompt_path', type=str, help=\"\", default='./prompts/learning_prompt_offlinev2.txt')\n parser.add_argument('--inference_prompt_path', type=str, help=\"\", default='./prompts/learning_prompt_inference.txt')\n parser.add_argument('--training_prompt_path', type=str, help=\"\", default='./prompts/module_debug_train_prompt.txt')\n\n parser.add_argument('--module_debug_init_prompt_path', type=str, help=\"\", default='./prompts/module_debug_init_prompt.txt')\n parser.add_argument('--module_debug_execute_error_prompt_path', type=str, help=\"\", default='./prompts/module_debug_execute_error_prompt.txt')\n parser.add_argument('--module_debug_execute_wrong_prompt_path', type=str, help=\"\", default='./prompts/module_debug_execute_wrong_prompt.txt')\n parser.add_argument('--merge_prompt_path', type=str, help=\"\", default='./prompts/merge_prompt.txt')\n\n # Save\n parser.add_argument('--module_save_dir', type=str, help=\"\", default='output/gqa_train_eval1') # Pending need to specify\n # Debug\n parser.add_argument('--test_num', type=int, help=\"\", default=3) # test 100 samples or 105\n\n # Model and Key Hyperparameter\n parser.add_argument('--stop_token', type=str, default=\"\", help='')\n parser.add_argument('--model', type=str, help=\"GPT Model\", default='gpt-3.5-turbo-16k') # Pending \"gpt-3.5-turbo-16k-0613\" or text-davinci-003\n parser.add_argument('--stage', type=float, help=\"\", default=0) # Pending\n\n # parse\n args = parser.parse_args()\n opt = vars(args)\n pprint('parsed input parameters:')\n pprint(opt)\n return args, opt"
},
{
"identifier": "get_samples",
"path": "engine/dataset.py",
"snippet": "def get_samples(args):\n samples = []\n if args.stage == 1 or args.stage == 3:\n dataset_class = get_dataset(args.dataset)\n return dataset_class.get_samples(args)\n elif args.stage == 1.5 or args.stage == 2:\n last_stage_output_dir = args.last_stage_output_dir\n file_list = os.listdir(last_stage_output_dir)\n for filename in file_list:\n if 'json' in filename:\n file_path = os.path.join(last_stage_output_dir, filename)\n last_stage_output_dict = json.load(open(file_path))\n last_stage_output_dict = strip_dict(last_stage_output_dict)\n samples.append(last_stage_output_dict)\n\n return samples"
},
{
"identifier": "get_module_list",
"path": "engine/util.py",
"snippet": "def get_module_list(args):\n if not args.use_new_module:\n return []\n module_save_dir = args.module_save_dir\n if os.path.isdir(module_save_dir):\n file_list = os.listdir(module_save_dir)\n module_name_dict = {}\n for filename in file_list:\n # relieve the name constraint\n if 'MODULE' in filename and 'json' in filename or 'json' in filename:\n file_path = os.path.join(module_save_dir, filename)\n try:\n module_dict = json.load(open(file_path))\n module_dict = strip_dict(module_dict)\n name_key = \"module_name\" if \"module_name\" in module_dict else \"name\"\n module_name = module_dict[name_key]\n module_dict['module_name'] = module_name\n if 'module_program' not in module_dict:\n module_dict['module_program'] = module_dict['module']\n #if 'annotations' not in module_dict:\n # module_dict['annotations'] = module_dict['program']\n if module_name not in module_name_dict or \\\n module_dict['test_accuracy'] > module_name_dict[module_name]['test_accuracy']:\n module_name_dict[module_name] = module_dict\n except:\n import pdb\n pdb.set_trace()\n \n module_list = []\n for module_dict in module_name_dict.values():\n if 'test_accuracy' not in module_dict:\n module_list.append(module_dict)\n elif module_dict['test_accuracy'] >= args.threshold:\n module_list.append(module_dict)\n else:\n print(\"There is no available module directory: %s\"%(module_save_dir))\n module_list = []\n return module_list"
},
{
"identifier": "save_output",
"path": "engine/util.py",
"snippet": "def save_output(args, output_dict, filename=None):\n output_dir = args.output_dir\n if args.stage == 1:\n output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')\n json.dump(output_dict, open(output_path, 'w'), indent=2)\n elif args.stage == 1.5:\n if args.split_cases:\n module_head_list = output_dict.pop('module_head_list')\n for index, module_head in enumerate(module_head_list):\n output_dict['module_head'] = module_head\n output_path = os.path.join(output_dir, output_dict['annotations'][index]['id'] + '.json')\n json.dump(output_dict, open(output_path, 'w'), indent=2)\n else:\n output_path = os.path.join(output_dir, output_dict['annotations'][0]['id'] + '.json')\n json.dump(output_dict, open(output_path, 'w'), indent=2)\n elif args.stage == 2:\n if filename is None:\n filename = 'MODULE_' + output_dict['module_name'] + '.json'\n output_path = os.path.join(output_dir, filename)\n json.dump(output_dict, open(output_path, 'w'), indent=2)\n elif args.stage == 3:\n output_path = os.path.join(output_dir, 'result_' + output_dict['id'] + '.json')\n json.dump(output_dict, open(output_path, 'w'), indent=2)\n pass"
},
{
"identifier": "pre_process",
"path": "engine/util.py",
"snippet": "def pre_process(args):\n if args.save_output:\n if not os.path.isdir(args.output_dir):\n os.makedirs(args.output_dir)\n init_gpt(args)"
},
{
"identifier": "post_process",
"path": "engine/util.py",
"snippet": "def post_process(args):\n if args.save_output:\n if args.stage == 3:\n dataset_class = get_dataset(args.dataset)\n dataset_class.post_process(args)"
},
{
"identifier": "get_response",
"path": "engine/gpt.py",
"snippet": "def get_response(args, prompt_to_gpt):\n generator = get_generator(args)\n response, _ = generator.generate(prompt_to_gpt)\n response = response.replace('\\t', ' ')\n return response"
},
{
"identifier": "parse_response",
"path": "engine/gpt.py",
"snippet": "def parse_response(args, response: str):\n if args.stage == 1:\n if '\"\"\"' not in response:\n return False, None, None\n module_dict = {}\n\n\n ''' copied code '''\n prog_lines = response.split(\"\\n\")\n is_prog = False\n in_comment = False\n module_head = []\n for line in prog_lines:\n if '():' in line:\n is_prog = True\n if is_prog:\n module_head.append(line)\n if '\"\"\"' in line:\n if not in_comment:\n in_comment = True\n else:\n break\n module_name = module_head[0].split('class')[1].split('(')[0].strip()\n module_head = \"\\n\".join(module_head)\n if module_head.count(\"class\") > 1:\n module_head = module_head.rsplit(\"class\", 1)[0]\n program = [(line.strip() if '=' in line else '')\n for line in module_head.split(\"\\n\")]\n program = \"\\n\".join(program).strip()\n\n\n module_dict['module_name'] = module_name\n module_dict['module_head'] = module_head\n return True, module_dict, program\n elif args.stage == 2:\n if 'step_name' in response:\n return '\\n step_name' + response.split('step_name', 1)[1]\n else:\n return response\n elif args.stage == 1.5 or args.stage == 3:\n program = [(line.strip() if '=' in line else '')\n for line in response.split(\"\\n\")]\n program = \"\\n\".join(program).strip()\n return program"
},
{
"identifier": "get_prompt",
"path": "engine/prompt.py",
"snippet": "def get_prompt(args, module_list=[], type='default'):\n if args.stage == 1:\n prompt_path = args.online_prompt_path\n prompt = open(prompt_path).read().strip()\n\n new_module_head_list = []\n new_module_list = []\n for module_dict in module_list:\n new_module_head_list.append(module_dict['module_head'])\n new_module_list.append(module_dict['module_name'])\n\n prompt = prompt.replace('__NEW_MODULE_HEAD_LIST__', '\\n'.join(new_module_head_list))\n prompt = prompt.replace('__NEW_MODULE_LIST__', '\\n'.join(new_module_list))\n\n return prompt\n elif args.stage == 1.5:\n prompt_path = args.merge_prompt_path\n prompt = open(prompt_path).read().strip()\n\n new_module_head_list = []\n new_module_list = []\n for module_dict in module_list:\n new_module_head_list.append(module_dict['module_head'])\n new_module_list.append(module_dict['module_name'])\n\n prompt = prompt.replace('__NEW_MODULE_HEAD_LIST__', '\\n'.join(new_module_head_list))\n prompt = prompt.replace('__NEW_MODULE_LIST__', '\\n'.join(new_module_list))\n\n return prompt\n elif args.stage == 2:\n if type == 'default':\n prompt_path = args.module_make_prompt_path\n elif type == 'debug_init':\n prompt_path = args.module_debug_init_prompt_path\n elif type == 'debug_execute_error':\n prompt_path = args.module_debug_execute_error_prompt_path\n elif type == 'debug_execute_wrong':\n prompt_path = args.module_debug_execute_wrong_prompt_path\n prompt = open(prompt_path).read().strip()\n return prompt\n elif args.stage == 3:\n prompt_path = args.inference_prompt_path\n prompt = open(prompt_path).read().strip()\n\n new_module_example_list = []\n new_module_list = []\n for module_dict in module_list:\n if \"annotations\" not in module_dict:\n print(\"No examplar cases\\n\")\n continue\n for ann in module_dict[\"annotations\"]:\n new_module_example_list.append(\n f'Question: {ann[\"question\"]}\\n' +\n 'Program:\\n' +\n ann['high_level_program'])\n new_module_list.append(module_dict['module_name'])\n\n prompt = prompt.replace('__NEW_MODULE_EXAMPLE_LIST__', '\\n'.join(new_module_example_list))\n prompt = prompt.replace('__NEW_MODULE_LIST__', '\\n'.join(new_module_list))\n\n return prompt"
},
{
"identifier": "format_prompt",
"path": "engine/prompt.py",
"snippet": "def format_prompt(args, prompt, module=None, ann=None, message=None, pred_answer=None, type='default'):\n if args.stage == 1 or args.stage == 3:\n prompt = prompt.replace('__INSERT_NEW_QUESTION__', ann['question'])\n return prompt\n elif args.stage == 1.5:\n prompt = prompt.replace('__INSERT_NEW_QUESTION__', ann['question'])\n prompt = prompt.replace('__MODULE_NAME__', module['module_name'])\n prompt = prompt.replace('__MODULE_HEAD_HALF__', module['module_head_half'])\n return prompt\n elif args.stage == 2:\n if type == 'default':\n prompt = prompt.replace('__MODULE_NAME__', module['module_name'])\n prompt = prompt.replace('__MODULE_HEAD__', module['module_head'])\n return prompt\n elif type == 'debug_init':\n prompt = prompt.replace('__MODULE_NAME__', module['module_name'])\n prompt = prompt.replace('__MODULE_HEAD__', module['module_head'])\n prompt = prompt.replace('__DEBUG_MESSAGE__', message)\n return prompt\n elif type == 'debug_execute_error':\n prompt = prompt.replace('__MODULE_NAME__', module['module_name'])\n prompt = prompt.replace('__MODULE_HEAD__', module['module_head'])\n prompt = prompt.replace('__DEBUG_MESSAGE__', message)\n prompt = prompt.replace('__HIGH_LEVEL_PROGRAM__', ann['high_level_program'])\n return prompt\n elif type == 'debug_execute_wrong':\n prompt = prompt.replace('__MODULE_NAME__', module['module_name'])\n prompt = prompt.replace('__MODULE_HEAD__', module['module_head'])\n prompt = prompt.replace('__ANSWER__', ann['answer'])\n if not isinstance(pred_answer, str):\n pred_answer = 'not a string'\n prompt = prompt.replace('__OUTPUT__', pred_answer)\n prompt = prompt.replace('__HIGH_LEVEL_PROGRAM__', ann['high_level_program'])\n return prompt"
},
{
"identifier": "create_interpreter",
"path": "engine/interpreter.py",
"snippet": "def create_interpreter(args):\n dataset_class = get_dataset(args.dataset)\n return dataset_class.get_interpreter(args)"
},
{
"identifier": "create_module_instance",
"path": "engine/interpreter.py",
"snippet": "def create_module_instance(args, interpreter, module_dict):\n\n module_name = module_dict['module_name']\n module_prog = module_dict['module_program']\n\n ''' copied code '''\n try:\n exec(module_prog, globals())\n interpreter.add_step_interpreter(\n module_name.upper(), eval(module_name + '()', globals()))\n print(\"successfully intialize the module %s!\" %\n module_name.upper())\n if \"VERIFY\" in module_name and \"META_VERIFY\" in interpreter.step_interpreters:\n if not hasattr(interpreter.step_interpreters['META_VERIFY'], 'sub_module_dict'):\n interpreter.step_interpreters['META_VERIFY'].sub_module_dict = {}\n interpreter.step_interpreters['META_VERIFY'].sub_module_dict[module_name] = interpreter.step_interpreters[module_name]\n if \"COMPARE\" in module_name and \"META_COMPARE\" in interpreter.step_interpreters:\n if not hasattr(interpreter.step_interpreters['META_COMPARE'], 'sub_module_dict'):\n interpreter.step_interpreters['META_COMPARE'].sub_module_dict = {}\n interpreter.step_interpreters['META_COMPARE'].sub_module_dict[module_name] = interpreter.step_interpreters[module_name]\n if \"SORT_SPATIAL\" in module_name and \"SORT_SPATIAL_OBJ\" in interpreter.step_interpreters:\n if not hasattr(interpreter.step_interpreters['SORT_SPATIAL_OBJ'], 'sub_module_dict'):\n interpreter.step_interpreters['SORT_SPATIAL_OBJ'].sub_module_dict = {}\n interpreter.step_interpreters['SORT_SPATIAL_OBJ'].sub_module_dict[module_name] = interpreter.step_interpreters[module_name]\n return interpreter, None\n except Exception as e:\n print(\"ERROR when creating instance!\")\n traceback_message = traceback.format_exc()\n print(traceback_message)\n error_line = None\n for traceback_line in traceback_message.splitlines():\n if 'File \"<string>\"' in traceback_line:\n error_line = traceback_line.strip()\n line_message = \"\"\n if error_line:\n lineno = int(error_line.split(\"line \")[1].split(',')[0])\n module_prog_lines = module_prog.splitlines()\n class_line = module_prog_lines[lineno - 1].strip(\n ) if lineno <= len(module_prog_lines) else None\n if class_line:\n error_line = error_line.replace(\n 'File \"<string>\"', f'Class {module_name}')\n line_message = error_line + '\\n ' + class_line + '\\n'\n error_message = \"\"\n for traceback_line in traceback_message.splitlines():\n if 'Error:' in traceback_line:\n error_message = traceback_line.strip() + '\\n'\n global_debug_output_exe = \"\\nDebuging message \\n\"\n debug_message = line_message + error_message\n global_debug_output_exe += debug_message\n # module_prog = (first_line + '\\n' +\n # module_prog).replace('\\t', ' ')\n global_debug_output_exe += module_prog\n\n return interpreter, global_debug_output_exe"
},
{
"identifier": "test_on_cases",
"path": "engine/interpreter.py",
"snippet": "def test_on_cases(args, interpreter, test_cases):\n accuracy_list = []\n pred_answer_list = []\n message_list = []\n prog_state_list = []\n for case in test_cases:\n pred_answer, message, prog_state = execute_program(args, interpreter, case)\n accuracy_list.append(verify_answer(args, pred_answer, case))\n pred_answer_list.append(pred_answer)\n message_list.append(message)\n prog_state_list.append(prog_state)\n return accuracy_list, pred_answer_list, message_list, prog_state_list"
}
] | import numpy as np
from tqdm import tqdm
from param import parse_opt
from engine.dataset import get_samples
from engine.util import get_module_list, save_output, pre_process, post_process
from engine.gpt import get_response, parse_response
from engine.prompt import get_prompt, format_prompt
from engine.interpreter import create_interpreter, create_module_instance, test_on_cases | 4,321 |
def stage1(args):
module_list = get_module_list(args)
prompt = get_prompt(args, module_list=module_list)
|
def stage1(args):
module_list = get_module_list(args)
prompt = get_prompt(args, module_list=module_list) | samples = get_samples(args) | 1 | 2023-11-01 16:39:33+00:00 | 8k |
ml4bio/RhoFold | rhofold/model/e2eformer.py | [
{
"identifier": "Linear",
"path": "rhofold/model/primitives.py",
"snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)"
},
{
"identifier": "LayerNorm",
"path": "rhofold/model/primitives.py",
"snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out"
},
{
"identifier": "MSARowAttentionWithPairBias",
"path": "rhofold/model/msa.py",
"snippet": "class MSARowAttentionWithPairBias(MSAAttention):\n \"\"\"\n Implements Algorithm 7.\n \"\"\"\n\n def __init__(self, c_m, c_z, c_hidden, no_heads, inf=1e9):\n \"\"\"\n Args:\n c_m:\n Input channel dimension\n c_z:\n Pair embedding channel dimension\n c_hidden:\n Per-head hidden channel dimension\n no_heads:\n Number of attention heads\n inf:\n Large number used to construct attention masks\n \"\"\"\n super(MSARowAttentionWithPairBias, self).__init__(\n c_m,\n c_hidden,\n no_heads,\n pair_bias=True,\n c_z=c_z,\n inf=inf,\n )"
},
{
"identifier": "MSAColumnAttention",
"path": "rhofold/model/msa.py",
"snippet": "class MSAColumnAttention(nn.Module):\n \"\"\"\n Implements Algorithm 8.\n\n By rights, this should also be a subclass of MSAAttention. Alas,\n most inheritance isn't supported by TorchScript.\n \"\"\"\n\n def __init__(self, c_m, c_hidden, no_heads, inf=1e9):\n \"\"\"\n Args:\n c_m:\n MSA channel dimension\n c_hidden:\n Per-head hidden channel dimension\n no_heads:\n Number of attention heads\n inf:\n Large number used to construct attention masks\n \"\"\"\n super(MSAColumnAttention, self).__init__()\n \n self.c_m = c_m\n self.c_hidden = c_hidden\n self.no_heads = no_heads\n self.inf = inf\n\n self._msa_att = MSAAttention(\n c_in=c_m,\n c_hidden=c_hidden,\n no_heads=no_heads,\n pair_bias=False,\n c_z=None,\n inf=inf,\n )\n\n def forward(self, \n m: torch.Tensor, \n mask: Optional[torch.Tensor] = None, \n chunk_size: Optional[int] = None,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n m:\n [*, N_seq, N_res, C_m] MSA embedding\n mask:\n [*, N_seq, N_res] MSA mask\n chunk_size:\n Size of chunks into which the inputs are split along their\n batch dimensions. A low value decreases memory overhead at the \n cost of slower execution. Chunking is not performed by default.\n \"\"\" \n # [*, N_res, N_seq, C_in]\n m = m.transpose(-2, -3)\n if mask is not None:\n mask = mask.transpose(-1, -2)\n\n m = self._msa_att(m, mask=mask, chunk_size=chunk_size)\n\n # [*, N_seq, N_res, C_in]\n m = m.transpose(-2, -3)\n if mask is not None:\n mask = mask.transpose(-1, -2)\n\n return m"
},
{
"identifier": "OuterProductMean",
"path": "rhofold/model/outer_product_mean.py",
"snippet": "class OuterProductMean(nn.Module):\n \"\"\"\n Implements Algorithm 10.\n \"\"\"\n\n def __init__(self, c_m, c_z, c_hidden, eps=1e-3):\n \"\"\"\n Args:\n c_m:\n MSA embedding channel dimension\n c_z:\n Pair embedding channel dimension\n c_hidden:\n Hidden channel dimension\n \"\"\"\n super(OuterProductMean, self).__init__()\n\n self.c_m = c_m\n self.c_z = c_z\n self.c_hidden = c_hidden\n self.eps = eps\n\n self.layer_norm = nn.LayerNorm(c_m)\n self.linear_1 = Linear(c_m, c_hidden)\n self.linear_2 = Linear(c_m, c_hidden)\n self.linear_out = Linear(c_hidden ** 2, c_z)\n\n def _opm(self, a, b):\n # [*, N_res, N_res, C, C]\n outer = torch.einsum(\"...bac,...dae->...bdce\", a, b)\n\n # [*, N_res, N_res, C * C]\n outer = outer.reshape(outer.shape[:-2] + (-1,))\n\n # [*, N_res, N_res, C_z]\n outer = self.linear_out(outer)\n\n return outer\n\n @torch.jit.ignore\n def _chunk(self, \n a: torch.Tensor, \n b: torch.Tensor, \n chunk_size: int\n ) -> torch.Tensor:\n\n a_reshape = a.reshape((-1,) + a.shape[-3:])\n b_reshape = b.reshape((-1,) + b.shape[-3:])\n out = []\n for a_prime, b_prime in zip(a_reshape, b_reshape):\n outer = chunk_layer(\n partial(self._opm, b=b_prime),\n {\"a\": a_prime},\n chunk_size=chunk_size,\n no_batch_dims=1,\n )\n out.append(outer)\n\n # For some cursed reason making this distinction saves memory\n if(len(out) == 1):\n outer = out[0].unsqueeze(0)\n else:\n outer = torch.stack(out, dim=0)\n\n outer = outer.reshape(a.shape[:-3] + outer.shape[1:])\n\n return outer\n\n def forward(self, \n m: torch.Tensor, \n mask: Optional[torch.Tensor] = None,\n chunk_size: Optional[int] = None,\n inplace_safe: bool = False,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n m:\n [*, N_seq, N_res, C_m] MSA embedding\n mask:\n [*, N_seq, N_res] MSA mask\n Returns:\n [*, N_res, N_res, C_z] pair embedding update\n \"\"\"\n if mask is None:\n mask = m.new_ones(m.shape[:-1])\n\n # [*, N_seq, N_res, C_m]\n ln = self.layer_norm(m)\n\n # [*, N_seq, N_res, C]\n mask = mask.unsqueeze(-1)\n a = self.linear_1(ln) \n a = a * mask\n \n b = self.linear_2(ln) \n b = b * mask\n\n del ln\n\n a = a.transpose(-2, -3)\n b = b.transpose(-2, -3)\n\n if chunk_size is not None:\n outer = self._chunk(a, b, chunk_size)\n else:\n outer = self._opm(a, b)\n\n # [*, N_res, N_res, 1]\n norm = torch.einsum(\"...abc,...adc->...bdc\", mask, mask)\n norm = norm + self.eps\n\n # [*, N_res, N_res, C_z]\n if(inplace_safe):\n outer /= norm\n else:\n outer = outer / norm\n\n return outer"
},
{
"identifier": "PairTransition",
"path": "rhofold/model/pair.py",
"snippet": "class PairTransition(nn.Module):\n \"\"\"\n Implements Algorithm 15.\n \"\"\"\n\n def __init__(self, c_z, n):\n \"\"\"\n Args:\n c_z:\n Pair transition channel dimension\n n:\n Factor by which c_z is multiplied to obtain hidden channel\n dimension\n \"\"\"\n super(PairTransition, self).__init__()\n\n self.c_z = c_z\n self.n = n\n\n self.layer_norm = LayerNorm(self.c_z)\n self.linear_1 = Linear(self.c_z, self.n * self.c_z)\n self.relu = nn.ReLU()\n self.linear_2 = Linear(self.n * self.c_z, c_z)\n\n def _transition(self, z, mask):\n # [*, N_res, N_res, C_z]\n z = self.layer_norm(z)\n \n # [*, N_res, N_res, C_hidden]\n z = self.linear_1(z)\n z = self.relu(z)\n\n # [*, N_res, N_res, C_z]\n z = self.linear_2(z) * mask\n\n return z\n\n @torch.jit.ignore\n def _chunk(self,\n z: torch.Tensor,\n mask: torch.Tensor,\n chunk_size: int,\n ) -> torch.Tensor:\n return chunk_layer(\n self._transition,\n {\"z\": z, \"mask\": mask},\n chunk_size=chunk_size,\n no_batch_dims=len(z.shape[:-2]),\n )\n\n\n def forward(self, \n z: torch.Tensor, \n mask: Optional[torch.Tensor] = None,\n chunk_size: Optional[int] = None,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n z:\n [*, N_res, N_res, C_z] pair embedding\n Returns:\n [*, N_res, N_res, C_z] pair embedding update\n \"\"\"\n if mask is None:\n mask = z.new_ones(z.shape[:-1])\n\n # [*, N_res, N_res, 1]\n mask = mask.unsqueeze(-1)\n\n if chunk_size is not None:\n z = self._chunk(z, mask, chunk_size)\n else:\n z = self._transition(z=z, mask=mask)\n\n return z"
},
{
"identifier": "TriangleAttention",
"path": "rhofold/model/triangular_attention.py",
"snippet": "class TriangleAttention(nn.Module):\n def __init__(\n self, c_in, c_hidden, no_heads, starting=True, inf=1e9\n ):\n \"\"\"\n Args:\n c_in:\n Input channel dimension\n c_hidden:\n Overall hidden channel dimension (not per-head)\n no_heads:\n Number of attention heads\n \"\"\"\n super(TriangleAttention, self).__init__()\n\n self.c_in = c_in\n self.c_hidden = c_hidden\n self.no_heads = no_heads\n self.starting = starting\n self.inf = inf\n\n self.layer_norm = LayerNorm(self.c_in)\n\n self.linear = Linear(c_in, self.no_heads, bias=False)\n\n self.mha = Attention(\n self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads\n )\n\n @torch.jit.ignore\n def _chunk(self,\n x: torch.Tensor,\n biases: List[torch.Tensor],\n chunk_size: int,\n use_memory_efficient_kernel: bool = False,\n inplace_safe: bool = False,\n ) -> torch.Tensor:\n \"triangle! triangle!\"\n mha_inputs = {\n \"q_x\": x,\n \"kv_x\": x,\n \"biases\": biases,\n }\n\n return chunk_layer(\n partial(\n self.mha, \n use_memory_efficient_kernel=use_memory_efficient_kernel,\n ),\n mha_inputs,\n chunk_size=chunk_size,\n no_batch_dims=len(x.shape[:-2]),\n _out=x if inplace_safe else None,\n )\n\n def forward(self, \n x: torch.Tensor, \n mask: Optional[torch.Tensor] = None,\n chunk_size: Optional[int] = None,\n use_memory_efficient_kernel: bool = False,\n inplace_safe: bool = False,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n x:\n [*, I, J, C_in] input tensor (e.g. the pair representation)\n Returns:\n [*, I, J, C_in] output tensor\n \"\"\" \n if mask is None:\n # [*, I, J]\n mask = x.new_ones(\n x.shape[:-1],\n )\n\n if(not self.starting):\n x = x.transpose(-2, -3)\n mask = mask.transpose(-1, -2)\n\n # [*, I, J, C_in]\n x = self.layer_norm(x)\n\n # [*, I, 1, 1, J]\n mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]\n\n # [*, H, I, J]\n triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))\n\n # [*, 1, H, I, J]\n triangle_bias = triangle_bias.unsqueeze(-4)\n\n biases = [mask_bias, triangle_bias]\n\n if chunk_size is not None:\n x = self._chunk(\n x, \n biases, \n chunk_size, \n use_memory_efficient_kernel=use_memory_efficient_kernel,\n inplace_safe=inplace_safe,\n )\n else:\n x = self.mha(\n q_x=x, \n kv_x=x, \n biases=biases,\n )\n\n if(not self.starting):\n x = x.transpose(-2, -3)\n\n return x"
},
{
"identifier": "TriangleMultiplicationOutgoing",
"path": "rhofold/model/triangular_update.py",
"snippet": "class TriangleMultiplicationOutgoing(TriangleMultiplicativeUpdate):\n \"\"\"\n Implements Algorithm 11.\n \"\"\"\n __init__ = partialmethod(TriangleMultiplicativeUpdate.__init__, _outgoing=True)"
},
{
"identifier": "TriangleMultiplicationIncoming",
"path": "rhofold/model/triangular_update.py",
"snippet": "class TriangleMultiplicationIncoming(TriangleMultiplicativeUpdate):\n \"\"\"\n Implements Algorithm 12.\n \"\"\"\n __init__ = partialmethod(TriangleMultiplicativeUpdate.__init__, _outgoing=False)"
},
{
"identifier": "chunk_layer",
"path": "rhofold/utils/chunk_utils.py",
"snippet": "def chunk_layer(\n layer: Callable,\n inputs: Dict[str, Any],\n chunk_size: int,\n no_batch_dims: int,\n low_mem: bool = False,\n _out: Any = None,\n _add_into_out: bool = False,\n) -> Any:\n \"\"\"\n Implements the \"chunking\" procedure described in section 1.11.8.\n\n Layer outputs and inputs are assumed to be simple \"pytrees,\"\n consisting only of (arbitrarily nested) lists, tuples, and dicts with\n torch.Tensor leaves.\n\n Args:\n layer:\n The layer to be applied chunk-wise\n inputs:\n A (non-nested) dictionary of keyworded inputs. All leaves must\n be tensors and must share the same batch dimensions.\n chunk_size:\n The number of sub-batches per chunk. If multiple batch\n dimensions are specified, a \"sub-batch\" is defined as a single\n indexing of all batch dimensions simultaneously (s.t. the\n number of sub-batches is the product of the batch dimensions).\n no_batch_dims:\n How many of the initial dimensions of each input tensor can\n be considered batch dimensions.\n low_mem:\n Avoids flattening potentially large input tensors. Unnecessary\n in most cases, and is ever so slightly slower than the default\n setting.\n Returns:\n The reassembled output of the layer on the inputs.\n \"\"\"\n if not (len(inputs) > 0):\n raise ValueError(\"Must provide at least one input\")\n\n initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]\n orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])\n\n def _prep_inputs(t):\n if(not low_mem):\n if not sum(t.shape[:no_batch_dims]) == no_batch_dims:\n t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])\n t = t.reshape(-1, *t.shape[no_batch_dims:])\n else:\n t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])\n return t\n\n prepped_inputs = tensor_tree_map(_prep_inputs, inputs)\n prepped_outputs = None\n if(_out is not None):\n reshape_fn = lambda t: t.view([-1] + list(t.shape[no_batch_dims:]))\n prepped_outputs = tensor_tree_map(reshape_fn, _out)\n\n flat_batch_dim = 1\n for d in orig_batch_dims:\n flat_batch_dim *= d\n\n no_chunks = flat_batch_dim // chunk_size + (\n flat_batch_dim % chunk_size != 0\n )\n\n i = 0\n out = prepped_outputs\n for _ in range(no_chunks):\n # Chunk the input\n if(not low_mem):\n select_chunk = (\n lambda t: t[i : i + chunk_size] if t.shape[0] != 1 else t\n )\n else:\n select_chunk = (\n partial(\n _chunk_slice, \n flat_start=i, \n flat_end=min(flat_batch_dim, i + chunk_size), \n no_batch_dims=len(orig_batch_dims)\n )\n )\n\n chunks = tensor_tree_map(select_chunk, prepped_inputs)\n\n # Run the layer on the chunk\n output_chunk = layer(**chunks)\n\n # Allocate space for the output\n if out is None:\n allocate = lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:])\n out = tensor_tree_map(allocate, output_chunk)\n\n # Put the chunk in its pre-allocated space\n out_type = type(output_chunk)\n if out_type is dict:\n def assign(d1, d2):\n for k, v in d1.items():\n if type(v) is dict:\n assign(v, d2[k])\n else:\n if(_add_into_out):\n v[i: i + chunk_size] += d2[k]\n else:\n v[i: i + chunk_size] = d2[k]\n\n assign(out, output_chunk)\n elif out_type is tuple:\n for x1, x2 in zip(out, output_chunk):\n if(_add_into_out):\n x1[i: i + chunk_size] += x2\n else:\n x1[i : i + chunk_size] = x2\n elif out_type is torch.Tensor:\n if(_add_into_out):\n out[i: i + chunk_size] += output_chunk\n else:\n out[i: i + chunk_size] = output_chunk\n else:\n raise ValueError(\"Not supported\")\n\n i += chunk_size\n\n reshape = lambda t: t.view(orig_batch_dims + t.shape[1:])\n out = tensor_tree_map(reshape, out)\n\n return out"
},
{
"identifier": "ChunkSizeTuner",
"path": "rhofold/utils/chunk_utils.py",
"snippet": "class ChunkSizeTuner:\n def __init__(self, \n # Heuristically, runtimes for most of the modules in the network \n # plateau earlier than this on all GPUs I've run the model on.\n max_chunk_size=256,\n ):\n self.max_chunk_size = max_chunk_size\n self.cached_chunk_size = None\n self.cached_arg_data = None\n\n def _determine_favorable_chunk_size(self, fn, args, min_chunk_size):\n \n if(min_chunk_size >= self.max_chunk_size):\n return min_chunk_size\n \n candidates = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]\n candidates = [c for c in candidates if c > min_chunk_size]\n candidates = [min_chunk_size] + candidates\n \n def test_chunk_size(chunk_size):\n try:\n with torch.no_grad():\n fn(*args, chunk_size=chunk_size)\n return True\n except RuntimeError:\n return False\n \n min_viable_chunk_size_index = 0\n i = len(candidates) - 1\n while i > min_viable_chunk_size_index:\n viable = test_chunk_size(candidates[i])\n if(not viable):\n i = (min_viable_chunk_size_index + i) // 2\n else:\n min_viable_chunk_size_index = i\n i = (i + len(candidates) - 1) // 2\n \n return candidates[min_viable_chunk_size_index]\n\n def _compare_arg_caches(self, ac1, ac2):\n consistent = True\n for a1, a2 in zip(ac1, ac2):\n assert(type(ac1) == type(ac2))\n if(type(ac1) is list or type(ac1) is tuple):\n consistent &= self._compare_arg_caches(a1, a2)\n elif(type(ac1) is dict):\n a1_items = [\n v for _, v in sorted(a1.items(), key=lambda x: x[0])\n ]\n a2_items = [\n v for _, v in sorted(a2.items(), key=lambda x: x[0])\n ]\n consistent &= self._compare_arg_caches(a1_items, a2_items)\n else:\n consistent &= a1 == a2\n\n return consistent\n\n def tune_chunk_size(self,\n representative_fn: Callable,\n args: Tuple[Any],\n min_chunk_size: int,\n ) -> int:\n consistent = True\n remove_tensors = lambda a: a.shape if type(a) is torch.Tensor else a\n arg_data = tree_map(remove_tensors, args, object) \n if(self.cached_arg_data is not None):\n # If args have changed shape/value, we need to re-tune\n assert(len(self.cached_arg_data) == len(arg_data))\n consistent = self._compare_arg_caches(\n self.cached_arg_data, arg_data\n ) \n else:\n # Otherwise, we can reuse the precomputed value\n consistent = False\n\n if(not consistent):\n self.cached_chunk_size = self._determine_favorable_chunk_size(\n representative_fn,\n args,\n min_chunk_size,\n )\n self.cached_arg_data = arg_data\n\n return self.cached_chunk_size"
},
{
"identifier": "add",
"path": "rhofold/utils/tensor_utils.py",
"snippet": "def add(m1, m2, inplace):\n # The first operation in a checkpoint can't be in-place, but it's\n # nice to have in-place addition during inference. Thus...\n if(not inplace):\n m1 = m1 + m2\n else:\n m1 += m2\n\n return m1"
}
] | import torch
import torch.nn as nn
from typing import Tuple, Sequence, Optional
from functools import partial
from rhofold.model.primitives import Linear, LayerNorm
from rhofold.model.msa import (
MSARowAttentionWithPairBias,
MSAColumnAttention,
)
from rhofold.model.outer_product_mean import OuterProductMean
from rhofold.model.pair import PairTransition
from rhofold.model.triangular_attention import (
TriangleAttention,
)
from rhofold.model.triangular_update import (
TriangleMultiplicationOutgoing,
TriangleMultiplicationIncoming,
)
from rhofold.utils.chunk_utils import chunk_layer, ChunkSizeTuner
from rhofold.utils.tensor_utils import add | 6,534 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MSATransition(nn.Module):
"""
Feed-forward network applied to MSA activations after attention.
Implements Algorithm 9
"""
def __init__(self, c_m, n):
"""
Args:
c_m:
MSA channel dimension
n:
Factor multiplied to c_m to obtain the hidden channel
dimension
"""
super(MSATransition, self).__init__()
self.c_m = c_m
self.n = n
self.layer_norm = LayerNorm(self.c_m)
self.linear_1 = Linear(self.c_m, self.n * self.c_m)
self.relu = nn.ReLU()
self.linear_2 = Linear(self.n * self.c_m, self.c_m)
def _transition(self, m, mask):
m = self.layer_norm(m)
m = self.linear_1(m)
m = self.relu(m)
m = self.linear_2(m) * mask
return m
@torch.jit.ignore
def _chunk(self,
m: torch.Tensor,
mask: torch.Tensor,
chunk_size: int,
) -> torch.Tensor:
return chunk_layer(
self._transition,
{"m": m, "mask": mask},
chunk_size=chunk_size,
no_batch_dims=len(m.shape[:-2]),
)
def forward(
self,
m: torch.Tensor,
mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None,
) -> torch.Tensor:
"""
Args:
m:
[*, N_seq, N_res, C_m] MSA activation
mask:
[*, N_seq, N_res, C_m] MSA mask
Returns:
m:
[*, N_seq, N_res, C_m] MSA activation update
"""
if mask is None:
mask = m.new_ones(m.shape[:-1])
mask = mask.unsqueeze(-1)
if chunk_size is not None:
m = self._chunk(m, mask, chunk_size)
else:
m = self._transition(m, mask)
return m
class E2EformerBlockCore(nn.Module):
def __init__(
self,
c_m: int,
c_z: int,
c_hidden_opm: int,
c_hidden_mul: int,
c_hidden_pair_att: int,
no_heads_msa: int,
no_heads_pair: int,
transition_n: int,
inf: float,
eps: float,
_is_extra_msa_stack: bool = False,
):
super(E2EformerBlockCore, self).__init__()
self.msa_transition = MSATransition(
c_m=c_m,
n=transition_n,
)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MSATransition(nn.Module):
"""
Feed-forward network applied to MSA activations after attention.
Implements Algorithm 9
"""
def __init__(self, c_m, n):
"""
Args:
c_m:
MSA channel dimension
n:
Factor multiplied to c_m to obtain the hidden channel
dimension
"""
super(MSATransition, self).__init__()
self.c_m = c_m
self.n = n
self.layer_norm = LayerNorm(self.c_m)
self.linear_1 = Linear(self.c_m, self.n * self.c_m)
self.relu = nn.ReLU()
self.linear_2 = Linear(self.n * self.c_m, self.c_m)
def _transition(self, m, mask):
m = self.layer_norm(m)
m = self.linear_1(m)
m = self.relu(m)
m = self.linear_2(m) * mask
return m
@torch.jit.ignore
def _chunk(self,
m: torch.Tensor,
mask: torch.Tensor,
chunk_size: int,
) -> torch.Tensor:
return chunk_layer(
self._transition,
{"m": m, "mask": mask},
chunk_size=chunk_size,
no_batch_dims=len(m.shape[:-2]),
)
def forward(
self,
m: torch.Tensor,
mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None,
) -> torch.Tensor:
"""
Args:
m:
[*, N_seq, N_res, C_m] MSA activation
mask:
[*, N_seq, N_res, C_m] MSA mask
Returns:
m:
[*, N_seq, N_res, C_m] MSA activation update
"""
if mask is None:
mask = m.new_ones(m.shape[:-1])
mask = mask.unsqueeze(-1)
if chunk_size is not None:
m = self._chunk(m, mask, chunk_size)
else:
m = self._transition(m, mask)
return m
class E2EformerBlockCore(nn.Module):
def __init__(
self,
c_m: int,
c_z: int,
c_hidden_opm: int,
c_hidden_mul: int,
c_hidden_pair_att: int,
no_heads_msa: int,
no_heads_pair: int,
transition_n: int,
inf: float,
eps: float,
_is_extra_msa_stack: bool = False,
):
super(E2EformerBlockCore, self).__init__()
self.msa_transition = MSATransition(
c_m=c_m,
n=transition_n,
)
| self.outer_product_mean = OuterProductMean( | 4 | 2023-11-01 10:29:08+00:00 | 8k |
dcermak/rpm-spec-language-server | tests/conftest.py | [
{
"identifier": "RpmSpecLanguageServer",
"path": "rpm_spec_language_server/server.py",
"snippet": "class RpmSpecLanguageServer(LanguageServer):\n _CONDITION_KEYWORDS = [\n # from https://github.com/rpm-software-management/rpm/blob/7d3d9041af2d75c4709cf7a721daf5d1787cce14/build/rpmbuild_internal.h#L58\n \"%endif\",\n \"%else\",\n \"%if\",\n \"%ifarch\",\n \"%ifnarch\",\n \"%ifos\",\n \"%ifnos\",\n \"%include\",\n \"%elifarch\",\n \"%elifos\",\n \"%elif\",\n ]\n\n def __init__(self) -> None:\n super().__init__(name := \"rpm_spec_language_server\", metadata.version(name))\n self.spec_files: dict[str, SpecSections] = {}\n self.macros = Macros.dump()\n self.auto_complete_data = create_autocompletion_documentation_from_spec_md(\n spec_md_from_rpm_db() or \"\"\n )\n\n def macro_and_scriptlet_completions(\n self, with_percent: bool\n ) -> list[CompletionItem]:\n return (\n [\n CompletionItem(\n label=key if with_percent else key[1:], documentation=value\n )\n for key, value in self.auto_complete_data.scriptlets.items()\n ]\n + [\n CompletionItem(label=keyword if with_percent else keyword[1:])\n for keyword in self._CONDITION_KEYWORDS\n ]\n + [\n CompletionItem(label=f\"%{macro.name}\" if with_percent else macro.name)\n for macro in self.macros\n ]\n )\n\n @property\n def trigger_characters(self) -> list[str]:\n return list(\n set(\n preamble_element[0]\n for preamble_element in {\n **self.auto_complete_data.preamble,\n **self.auto_complete_data.dependencies,\n }\n ).union({\"%\"})\n )\n\n def spec_sections_from_cache_or_file(\n self, text_document: TextDocumentIdentifier | TextDocumentItem\n ) -> SpecSections | None:\n if sections := self.spec_files.get((uri := text_document.uri), None):\n return sections\n\n if not (spec := spec_from_text_document(text_document)):\n return None\n\n self.spec_files[uri] = (sect := SpecSections.parse(spec))\n return sect"
},
{
"identifier": "create_rpm_lang_server",
"path": "rpm_spec_language_server/server.py",
"snippet": "def create_rpm_lang_server() -> RpmSpecLanguageServer:\n rpm_spec_server = RpmSpecLanguageServer()\n\n def did_open_or_save(\n server: RpmSpecLanguageServer,\n param: DidOpenTextDocumentParams | DidSaveTextDocumentParams,\n ) -> None:\n LOGGER.debug(\"open or save event\")\n if not (spec := spec_from_text_document(param.text_document)):\n return None\n\n LOGGER.debug(\"Saving parsed spec for %s\", param.text_document.uri)\n server.spec_files[param.text_document.uri] = SpecSections.parse(spec)\n\n rpm_spec_server.feature(TEXT_DOCUMENT_DID_OPEN)(did_open_or_save)\n rpm_spec_server.feature(TEXT_DOCUMENT_DID_SAVE)(did_open_or_save)\n\n @rpm_spec_server.feature(TEXT_DOCUMENT_DID_CLOSE)\n def did_close(\n server: RpmSpecLanguageServer, param: DidCloseTextDocumentParams\n ) -> None:\n if param.text_document.uri in server.spec_files:\n del server.spec_files[param.text_document.uri]\n\n @rpm_spec_server.feature(TEXT_DOCUMENT_DID_CHANGE)\n def did_change(\n server: RpmSpecLanguageServer, param: DidChangeTextDocumentParams\n ) -> None:\n LOGGER.debug(\"Text document %s changed\", (uri := param.text_document.uri))\n\n if spec := spec_from_text(\n server.workspace.text_documents[uri].source, os.path.basename(uri)\n ):\n server.spec_files[uri] = SpecSections.parse(spec)\n LOGGER.debug(\"Updated the spec for %s\", uri)\n\n @rpm_spec_server.feature(\n TEXT_DOCUMENT_COMPLETION,\n CompletionOptions(trigger_characters=rpm_spec_server.trigger_characters),\n )\n def complete_macro_name(\n server: RpmSpecLanguageServer, params: CompletionParams\n ) -> CompletionList:\n if not (\n spec_sections := server.spec_sections_from_cache_or_file(\n text_document=params.text_document\n )\n ):\n return CompletionList(is_incomplete=False, items=[])\n\n trigger_char = (\n None if params.context is None else params.context.trigger_character\n )\n\n # we are *not* in the preamble or a %package foobar section\n # only complete macros\n if not (\n cur_sect := spec_sections.section_under_cursor(params.position)\n ) or not cur_sect.name.startswith(\"package\"):\n # also if we have no completion context, just send macros and if we\n # have it, only send them if this was triggered by a %\n LOGGER.debug(\n \"Sending completions for outside the package section with trigger_character %s\",\n trigger_char,\n )\n if (trigger_char and trigger_char == \"%\") or trigger_char is None:\n return CompletionList(\n is_incomplete=False,\n items=server.macro_and_scriptlet_completions(\n with_percent=trigger_char is None\n ),\n )\n return CompletionList(is_incomplete=False, items=[])\n\n # we are in a package section => we can return preamble and dependency\n # tags as completion items too\n\n # return everything if we have no trigger character\n if trigger_char is None:\n LOGGER.debug(\n \"Sending completions for %package/preamble without a trigger_character\"\n )\n return CompletionList(\n is_incomplete=False,\n items=[\n CompletionItem(label=key, documentation=value)\n for key, value in {\n **server.auto_complete_data.dependencies,\n **server.auto_complete_data.preamble,\n }.items()\n ]\n + server.macro_and_scriptlet_completions(with_percent=True),\n )\n\n if trigger_char == \"%\":\n LOGGER.debug(\"Sending completions for %package/premable triggered by %\")\n return CompletionList(\n is_incomplete=False,\n items=server.macro_and_scriptlet_completions(with_percent=False),\n )\n else:\n LOGGER.debug(\n \"Sending completions for %package/premable triggered by %s\",\n trigger_char,\n )\n return CompletionList(\n is_incomplete=False,\n items=[\n CompletionItem(label=key, documentation=value)\n for key, value in {\n **server.auto_complete_data.dependencies,\n **server.auto_complete_data.preamble,\n }.items()\n if key.startswith(trigger_char)\n ],\n )\n\n @rpm_spec_server.feature(TEXT_DOCUMENT_DOCUMENT_SYMBOL)\n def spec_symbols(\n server: RpmSpecLanguageServer,\n param: DocumentSymbolParams,\n ) -> list[DocumentSymbol] | list[SymbolInformation] | None:\n if not (\n spec_sections := server.spec_sections_from_cache_or_file(\n text_document=param.text_document\n )\n ):\n return None\n\n return spec_sections.to_document_symbols()\n\n @rpm_spec_server.feature(TEXT_DOCUMENT_DEFINITION)\n def find_macro_definition(\n server: RpmSpecLanguageServer,\n param: DefinitionParams,\n ) -> Location | list[Location] | list[LocationLink] | None:\n # get the in memory spec if available\n if not (\n spec_sections := server.spec_sections_from_cache_or_file(\n param.text_document\n )\n ):\n return None\n\n macro_under_cursor = get_macro_under_cursor(\n spec=spec_sections.spec, position=param.position, macros_dump=server.macros\n )\n\n if not macro_under_cursor:\n return None\n\n macro_name = (\n macro_under_cursor\n if isinstance(macro_under_cursor, str)\n else macro_under_cursor.name\n )\n macro_level = (\n MacroLevel.SPEC\n if isinstance(macro_under_cursor, str)\n else macro_under_cursor.level\n )\n\n def find_macro_define_in_spec(file_contents: str) -> list[re.Match[str]]:\n \"\"\"Searches for the definition of the macro ``macro_under_cursor``\n as it would appear in a spec file, i.e.: ``%global macro`` or\n ``%define macro``.\n\n \"\"\"\n regex = re.compile(\n rf\"^([\\t \\f]*)(%(?:global|define))([\\t \\f]+)({macro_name})\",\n re.MULTILINE,\n )\n return list(regex.finditer(file_contents))\n\n def find_macro_in_macro_file(file_contents: str) -> list[re.Match[str]]:\n \"\"\"Searches for the definition of the macro ``macro_under_cursor``\n as it would appear in a rpm macros file, i.e.: ``%macro …``.\n\n \"\"\"\n regex = re.compile(\n rf\"^([\\t \\f]*)(%{macro_name})([\\t \\f]+)(\\S+)\", re.MULTILINE\n )\n return list(regex.finditer(file_contents))\n\n def find_preamble_definition_in_spec(\n file_contents: str,\n ) -> list[re.Match[str]]:\n regex = re.compile(\n rf\"^([\\t \\f]*)({macro_name}):([\\t \\f]+)(\\S*)\",\n re.MULTILINE | re.IGNORECASE,\n )\n if (m := regex.search(file_contents)) is None:\n return []\n return [m]\n\n define_matches, file_uri = [], None\n\n # macro is defined in the spec file\n if macro_level == MacroLevel.GLOBAL:\n if not (\n define_matches := find_macro_define_in_spec(str(spec_sections.spec))\n ):\n return None\n\n file_uri = param.text_document.uri\n\n # macro is something like %version, %release, etc.\n elif macro_level == MacroLevel.SPEC:\n if not (\n define_matches := find_preamble_definition_in_spec(\n str(spec_sections.spec)\n )\n ):\n return None\n file_uri = param.text_document.uri\n\n # the macro comes from a macro file\n #\n # We have now two options, either it is provided by a rpm package. Then\n # there will be a package providing `rpm_macro($NAME)`. If that is the\n # case, then we query the rpm db for all files provided by all packages\n # providing this symbol and look for the macro definition in all files\n # that are in %_rpmmacrodir (nothing else will be loaded by rpm)\n #\n # If this yields nothing, then the macro most likely comes from the\n # builtin macros file of rpm (_should_ be in %_rpmconfigdir/macros) so\n # we retry the search in that file.\n elif macro_level == MacroLevel.MACROFILES:\n MACROS_DIR = rpm.expandMacro(\"%_rpmmacrodir\")\n ts = rpm.TransactionSet()\n\n # search in packages\n for pkg in ts.dbMatch(\"provides\", f\"rpm_macro({macro_name})\"):\n for f in rpm.files(pkg):\n if f.name.startswith(MACROS_DIR):\n with open(f.name) as macro_file_f:\n if define_matches := find_macro_in_macro_file(\n macro_file_f.read(-1)\n ):\n file_uri = f\"file://{f.name}\"\n break\n\n # we didn't find a match\n # => the macro can be from %_rpmconfigdir/macros (no provides generated for it)\n if not define_matches:\n fname = rpm.expandMacro(\"%_rpmconfigdir\") + \"/macros\"\n with open(fname) as macro_file_f:\n if define_matches := find_macro_in_macro_file(\n macro_file_f.read(-1)\n ):\n file_uri = f\"file://{fname}\"\n\n if define_matches and file_uri:\n return [\n Location(\n uri=file_uri,\n range=Range(\n start := position_from_match(define_match),\n Position(\n line=start.line,\n character=(\n start.character\n + define_match.end()\n - define_match.start()\n ),\n ),\n ),\n )\n for define_match in define_matches\n ]\n\n return None\n\n @rpm_spec_server.feature(TEXT_DOCUMENT_HOVER)\n def expand_macro(\n server: RpmSpecLanguageServer, params: HoverParams\n ) -> Hover | None:\n if spec_sections := server.spec_files.get(params.text_document.uri, None):\n macro = get_macro_under_cursor(\n spec=spec_sections.spec,\n position=params.position,\n macros_dump=server.macros,\n )\n else:\n macro = get_macro_under_cursor(\n text_document=params.text_document,\n position=params.position,\n macros_dump=server.macros,\n )\n\n # not a macro or an unknown macro => cannot show a meaningful hover\n if not macro or isinstance(macro, str):\n return None\n\n if macro.level == MacroLevel.BUILTIN:\n return Hover(contents=\"builtin\")\n\n try:\n expanded_macro = Macros.expand(macro.body)\n formatted_macro = f\"```bash\\n{expanded_macro}\\n```\"\n contents = MarkupContent(kind=MarkupKind.Markdown, value=formatted_macro)\n return Hover(contents)\n except RPMException:\n return Hover(contents=macro.body)\n\n return rpm_spec_server"
}
] | import asyncio
import os
import threading
import pytest
from typing import Generator
from lsprotocol.types import (
EXIT,
INITIALIZE,
SHUTDOWN,
ClientCapabilities,
InitializeParams,
)
from pygls.server import LanguageServer
from typeguard import install_import_hook
from rpm_spec_language_server.server import (
RpmSpecLanguageServer,
create_rpm_lang_server,
) | 3,783 |
install_import_hook("rpm_spec_language_server")
class ClientServer:
# shamelessly stolen from
# https://github.com/openlawlibrary/pygls/blob/8f601029dcf3c7c91be7bf2d86a841a1598ce1f0/tests/ls_setup.py#L109
def __init__(self):
# Client to Server pipe
csr, csw = os.pipe()
# Server to client pipe
scr, scw = os.pipe()
# Setup Server
self.server = create_rpm_lang_server()
self.server_thread = threading.Thread(
name="Server Thread",
target=self.server.start_io,
args=(os.fdopen(csr, "rb"), os.fdopen(scw, "wb")),
)
self.server_thread.daemon = True
# Setup client
self.client = LanguageServer("client", "v1", asyncio.new_event_loop())
self.client_thread = threading.Thread(
name="Client Thread",
target=self.client.start_io,
args=(os.fdopen(scr, "rb"), os.fdopen(csw, "wb")),
)
self.client_thread.daemon = True
@classmethod
def decorate(cls):
return pytest.mark.parametrize("client_server", [cls], indirect=True)
def start(self) -> None:
self.server_thread.start()
self.server.thread_id = self.server_thread.ident
self.client_thread.start()
self.initialize()
def stop(self) -> None:
shutdown_response = self.client.lsp.send_request(SHUTDOWN).result()
assert shutdown_response is None
self.client.lsp.notify(EXIT)
self.server_thread.join()
self.client._stop_event.set()
try:
self.client.loop._signal_handlers.clear() # HACK ?
except AttributeError:
pass
self.client_thread.join()
# @retry_stalled_init_fix_hack()
def initialize(self) -> None:
timeout = None if "DISABLE_TIMEOUT" in os.environ else 1
response = self.client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=12345, root_uri="file://", capabilities=ClientCapabilities()
),
).result(timeout=timeout)
assert response.capabilities is not None
def __iter__(self) -> Generator[LanguageServer, None, None]:
yield self.client
yield self.server
|
install_import_hook("rpm_spec_language_server")
class ClientServer:
# shamelessly stolen from
# https://github.com/openlawlibrary/pygls/blob/8f601029dcf3c7c91be7bf2d86a841a1598ce1f0/tests/ls_setup.py#L109
def __init__(self):
# Client to Server pipe
csr, csw = os.pipe()
# Server to client pipe
scr, scw = os.pipe()
# Setup Server
self.server = create_rpm_lang_server()
self.server_thread = threading.Thread(
name="Server Thread",
target=self.server.start_io,
args=(os.fdopen(csr, "rb"), os.fdopen(scw, "wb")),
)
self.server_thread.daemon = True
# Setup client
self.client = LanguageServer("client", "v1", asyncio.new_event_loop())
self.client_thread = threading.Thread(
name="Client Thread",
target=self.client.start_io,
args=(os.fdopen(scr, "rb"), os.fdopen(csw, "wb")),
)
self.client_thread.daemon = True
@classmethod
def decorate(cls):
return pytest.mark.parametrize("client_server", [cls], indirect=True)
def start(self) -> None:
self.server_thread.start()
self.server.thread_id = self.server_thread.ident
self.client_thread.start()
self.initialize()
def stop(self) -> None:
shutdown_response = self.client.lsp.send_request(SHUTDOWN).result()
assert shutdown_response is None
self.client.lsp.notify(EXIT)
self.server_thread.join()
self.client._stop_event.set()
try:
self.client.loop._signal_handlers.clear() # HACK ?
except AttributeError:
pass
self.client_thread.join()
# @retry_stalled_init_fix_hack()
def initialize(self) -> None:
timeout = None if "DISABLE_TIMEOUT" in os.environ else 1
response = self.client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=12345, root_uri="file://", capabilities=ClientCapabilities()
),
).result(timeout=timeout)
assert response.capabilities is not None
def __iter__(self) -> Generator[LanguageServer, None, None]:
yield self.client
yield self.server
| CLIENT_SERVER_T = Generator[tuple[LanguageServer, RpmSpecLanguageServer], None, None] | 0 | 2023-11-02 10:52:17+00:00 | 8k |
ziqi-zhang/TAOISM | python/layers/batch_norm_2d.py | [
{
"identifier": "SecretActivationLayer",
"path": "python/layers/activation.py",
"snippet": "class SecretActivationLayer(SecretNonlinearLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.Shapefortranspose = None\n self.link_prev = link_prev\n self.link_next = link_next\n self.manual_register_prev = manually_register_prev\n self.manual_register_next = manually_register_next\n self.merge_own_tensors = merge_own_tensors\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = self.InputShape\n self.HandleShape = self.InputShape\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def link_tensors(self):\n if self.merge_own_tensors:\n self.manually_link_owned_two_tensors(\"input\", \"output\")\n super().link_tensors()\n\n\n def get_output_shape(self):\n return self.OutputShape\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n if len(self.InputShape) == 4:\n # self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/262144+1/2))), 262144, 1, 1]\n self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/602112+1/2))), 602112, 1, 1]\n \n else:\n self.Shapefortranspose = self.InputShape\n NeededTensorNames = [(\"output\", self.OutputShape, None),\n (\"handle\", self.HandleShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"inputtrans\", self.Shapefortranspose, None),\n (\"outputtrans\", self.Shapefortranspose, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.Enclave:\n # with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n # self.transfer_enclave_to_cpu(\"input\")\n # if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.transfer_cpu_to_enclave(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} ForwardFunc\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc(\"input\", \"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.CPU and torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.GPU and torch.sum(self.get_gpu(\"input\").abs()) == 0:\n raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n else:\n raise RuntimeError\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer()\n if self.is_enclave_mode:\n self.BackwardFunc(\"output\", \"DerOutput\", \"DerInput\")\n else:\n self.set_cpu(\"DerInput\", self.get_cpu(\"output\").grad_fn(self.get_cpu(\"DerOutput\")))"
},
{
"identifier": "LearnableParamTuple",
"path": "python/sgx_net.py",
"snippet": "def conv2d_op(w, x, is_div=True):\n def base_conv2d(sub_x, sub_w):\n def sum_of_div(best_shape):\ndef conv2d_input_grad_op(w, dy):\ndef conv2d_weight_grad_op(dy, x, is_div=True):\n def base_conv2d_weight_grad_op(sub_dy, sub_x):\n def sum_of_div(best_shape):\ndef matmul_op(w, x):\ndef matmul_input_grad_op(w, dy):\ndef matmul_weight_grad_op(dy, x):\ndef set_tensor_name_maybe_quantized(name, quantized):\ndef secret_op_class_factory(sid, target_op_name):\n def __init__(self, name):\n def target_op(self, a, b):\n def __init__(self, sid, nn_name):\n def set_layers(self, layers):\n def execute_for_each_layer(self, func, reverse=False):\n def classifier_output(self):\n def get_loss(self):\n def forward_with_time(self):\n def run_forward(layer):\n def forward(self):\n def run_forward(layer):\n def backward(self):\n def run_backward(layer):\n def plain_forward(self):\n def plain_backward(self):\n def show_plain_error(self):\n def __init__(self, sid):\n def set_layers(self, layers):\n def generate_tensor_name_list(self, force=False):\n def update_params(self, test_with_ideal=False):\n def update_params_in_layer(self, layer, test_with_ideal=False):\n def ideal_update_params_with_name(self, layer, der_name, param_name, shape):\ndef warming_up_cuda():\ndef init_communicate(rank, master_address, master_port, backend='gloo'):\nclass SecretNeuralNetwork(TensorLoader):\nclass SgdOptimizer(TensorLoader):"
},
{
"identifier": "TensorLoader",
"path": "python/tensor_loader.py",
"snippet": "class TensorLoader(EnclaveInterface):\n def __init__(self):\n super().__init__()\n self.sid = -1\n self.tensor_name_list = []\n self.encryption_tensor_name_list = {}\n self.RandomVarName = None\n self.ShareVarName = None\n self.ShareTuple = None\n\n def init(self, start_enclave=True):\n if start_enclave:\n print(\"Initializing sid: %d\" % self.sid)\n self.init_enclave()\n \n self.generate_tensor_name_list()\n # if hasattr(self, \"LayerName\") and self.LayerName == \"Layer1.0.main.relu2\":\n # st()\n\n self.init_enclave_tensors()\n self.init_cpu_tensor()\n self.init_encryption_tensor()\n \n\n def generate_tensor_name_list(self, force=False):\n return\n\n def link_tensors(self):\n pass\n\n def init_enclave_tensors(self):\n self.generate_tensor_name_list()\n for TensorName, shape, SeedList in self.tensor_name_list:\n if shape is None:\n raise ValueError(\"The shape is None. Please setup the shape before init_enclave_tensor\")\n # print(f\"TensorLoader init {TensorName}, {shape}\")\n self.init_enclave_tensor(TensorName, shape)\n if SeedList is None:\n continue\n for seed in SeedList:\n self.set_seed(TensorName, seed)\n\n def set_cpu(self, name, t):\n # print(\"---\", name, self.get_tag(name))\n GlobalTensor.set_cpu(self.get_tag(name), t)\n\n def set_gpu(self, name, t):\n GlobalTensor.set_gpu(self.get_tag(name), t)\n\n def set_encryption(self, name, t):\n GlobalTensor.set_encryption(self.get_tag(name), t)\n\n def get_cpu(self, name):\n return GlobalTensor.get_cpu(self.get_tag(name))\n\n def get_gpu(self, name):\n return GlobalTensor.get_gpu(self.get_tag(name))\n\n def get_encryption(self, name):\n return GlobalTensor.get_encryption(self.get_tag(name))\n\n def generate_cpu_tensor(self, name, shape):\n self.set_cpu(name, torch.zeros(shape).type(SecretConfig.dtypeForCpuOp))\n # self.CpuTensors[name] = torch.zeros(shape).type(SecretConfig.dtypeForCpuOp)\n\n def transfer_cpu_to_gpu(self, name):\n self.set_gpu(name, self.get_cpu(name).cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm))\n # self.GpuTensors[name] = self.CpuTensors[name].cuda(non_blocking=True).type(SecretConfig.dtypeForCudaMm)\n\n def transfer_gpu_to_cpu(self, name):\n cpu_tensor = self.get_cpu(name)\n gpu_tensor = self.get_gpu(name)\n cpu_tensor.copy_(gpu_tensor.type(SecretConfig.dtypeForCpuOp))\n\n def transfer_enclave_to_cpu(self, name):\n self.from_enclave(name, self.get_cpu(name))\n\n def transfer_cpu_to_enclave(self, name):\n self.set_tensor(name, self.get_cpu(name))\n\n def init_cpu_tensor(self):\n self.generate_tensor_name_list()\n\n for TensorName, shape, _ in self.tensor_name_list:\n self.generate_cpu_tensor(TensorName, shape)\n\n def init_encryption_tensor(self):\n self.generate_tensor_name_list()\n\n for name, shape in self.encryption_tensor_name_list:\n GlobalTensor.init_encrypted_tensor(self.get_tag(name), shape)\n # self.EncrtyptedTensors[name] = self.CreateEncryptTorch(shape)\n\n def set_tensor_cpu_enclave(self, name, tensor):\n # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)\n self.set_cpu(name, tensor)\n self.set_tensor(name, tensor)\n # print(\"Set cpu enclave: \", tensor[0,:10])\n\n def set_tensor_cpu_gpu_enclave(self, name, tensor):\n # GlobalTensor.SetNamedTensor(self.GetTag(tag), tensor)\n self.set_cpu(name, tensor)\n self.set_tensor(name, tensor)\n self.set_gpu(name, tensor)\n # print(\"Set cpu enclave: \", tensor[0,:10])\n\n def from_enclave(self, name, tensor):\n self.get_tensor(name, tensor)\n\n # def generate_enclave_tensor(self, name):\n # if name in self.RandomVarName:\n # return self.async_get_random(name, self.get_cpu(name))\n # elif name in self.ShareVarName:\n # original, seed = self.ShareTuple[name]\n # return self.async_get_share(original, self.get_cpu(name), seed)\n # else:\n # raise Exception(\"Doesnt how to generate this tensor\")"
},
{
"identifier": "NamedTimerInstance",
"path": "python/utils/timer_utils.py",
"snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..."
},
{
"identifier": "VerboseLevel",
"path": "python/utils/timer_utils.py",
"snippet": "class VerboseLevel(IntEnum):\n EVERY = 1\n LAYER = 2\n RUN = 3\n EPOCH = 4"
},
{
"identifier": "compare_expected_actual",
"path": "python/utils/torch_utils.py",
"snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res"
},
{
"identifier": "ExecutionModeOptions",
"path": "python/utils/basic_utils.py",
"snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3"
},
{
"identifier": "SecretConfig",
"path": "python/global_config.py",
"snippet": "class SecretConfig(object):\n worldSize = 3\n PrimeLimit = (1 << 21) - 9\n dtypeForCpuMod = torch.float32\n dtypeForCudaMm = torch.float64\n dtypeForCpuOp = torch.float32\n dtypeForSave = torch.float32\n stateless_logfile = \"stateless.log\"\n stateless_logger_name = \"stateless_logger\"\n is_comptue_gpu = True"
}
] | import numpy as np
import torch
from pdb import set_trace as st
from python.layers.activation import SecretActivationLayer
from python.sgx_net import LearnableParamTuple
from python.tensor_loader import TensorLoader
from python.utils.timer_utils import NamedTimerInstance, VerboseLevel
from python.utils.torch_utils import compare_expected_actual
from python.utils.basic_utils import ExecutionModeOptions
from python.global_config import SecretConfig | 6,380 | self.set_gpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_gpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_gpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_gpu("RunVar", self.ForwardFunc.running_var.data)
self.PlainFunc.eval()
self.ForwardFunc.cuda().eval()
# def inject_params(self, params):
# if self.sid == -2:
# raise ValueError("S2 has no learnable parameters for injection")
# self.get_cpu("weight").copy_(params.weight.data)
# self.get_cpu("bias").copy_(params.bias.data)
# self.get_cpu("RunMean").copy_(params.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").copy_(params.running_var.data)
# if self.is_enclave_mode:
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
def inject_params(self, params):
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]:
self.get_cpu("weight").copy_(params.weight.data)
self.get_cpu("bias").copy_(params.bias.data)
self.get_cpu("RunMean").copy_(params.running_mean.data)
self.get_cpu("RunVar").copy_(params.running_var.data)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.get_gpu("weight").copy_(params.weight.data)
self.get_gpu("bias").copy_(params.bias.data)
self.get_gpu("RunMean").copy_(params.running_mean.data)
self.get_gpu("RunVar").copy_(params.running_var.data)
def reset_plain_bn(self):
# module = torch.BatchNorm2d()
self.get_cpu("weight").copy_(torch.ones(self.InputShape[1]))
self.get_cpu("bias").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunMean").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunVar").copy_(torch.ones(self.InputShape[1]))
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:
raise NotImplementedError
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
self.make_sure_cpu_is_latest("weight")
self.make_sure_cpu_is_latest("bias")
plain_layer.weight.data.copy_(self.get_cpu("weight"))
plain_layer.bias.data.copy_(self.get_cpu("bias"))
plain_layer.running_mean.data.copy_(self.get_cpu("RunMean"))
plain_layer.running_var.data.copy_(self.get_cpu("RunVar"))
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
if self.EnclaveMode is ExecutionModeOptions.Enclave:
NeededTensorNames = [
("input", self.InputShape, None),
# ("DerInput", self.InputShape, None),
("output", self.OutputShape, None),
# ("DerOutput", self.OutputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
("RunMean", self.WeightShape, None),
("CurMean", self.WeightShape, None),
("RunVar", self.WeightShape, None),
("CurVar", self.WeightShape, None),
("mu", self.InputShape, None),
]
else:
NeededTensorNames = [
("output", self.OutputShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
# ("DerOutput", self.OutputShape, None)
]
self.tensor_name_list = NeededTensorNames
# def forward(self):
# if self.sid == 2:
# return
# with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
# if self.is_enclave_mode:
# self.forward_tensor_transfer()
# self.batchnorm_forward(self.LayerName, int(False))
# else:
# self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
# self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
# self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
# self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# # running_var of PlainFunc is ^2 of that in the enclave
# enclave_running_var = self.get_cpu("RunVar")
# self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
def forward(self):
|
class SecretBatchNorm2dLayer(SecretActivationLayer):
# https://pytorch.org/docs/stable/nn.html#batchnorm2d
BatchSize = None
NumChannel = None
ImgH = None
ImgW = None
WeightShape = None
def __init__(
self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,
manually_register_prev=False, manually_register_next=False, merge_own_tensors=False
):
super().__init__(
sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors
)
self.ForwardFuncName = "BatchNorm2d"
self.BackwardFuncName = "DerBatchNorm2d"
self.PlainFunc = torch.nn.BatchNorm2d
self.IsAffine = True
self.momentum = 0.1
self.IsCumulative = (self.momentum is None)
self.epsilon = 1e-5
if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:
self.ForwardFunc = torch.nn.BatchNorm2d
# if self.is_enclave_mode:
# self.StoreInEnclave = True
# else:
# self.ForwardFunc = torch.nn.BatchNorm2d
# self.StoreInEnclave = False
def init_shape(self):
self.InputShape = self.PrevLayer.get_output_shape()
self.OutputShape = self.InputShape
self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape
self.WeightShape = [self.NumChannel]
self.LearnableParamsList = [
LearnableParamTuple(dw_name="DerWeight", w_name="weight", shape=self.WeightShape),
LearnableParamTuple(dw_name="DerBias", w_name="bias", shape=self.WeightShape),
]
# def init(self, start_enclave=True):
# if self.sid == 2:
# return
# TensorLoader.init(self, start_enclave)
# if self.is_enclave_mode:
# self.PlainFunc = self.PlainFunc(self.InputShape[1])
# self.PlainFunc.eval()
# self.get_cpu("weight").data.copy_(self.PlainFunc.weight.data)
# self.get_cpu("bias").data.copy_(self.PlainFunc.bias.data)
# self.get_cpu("RunMean").data.copy_(self.PlainFunc.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").data.copy_(self.PlainFunc.running_var.data)
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
# self.batchnorm_init(
# self.LayerName,
# "input", "output", "weight", "bias",
# "DerInput", "DerOutput", "DerWeight", "DerBias",
# "RunMean", "RunVar", "CurMean", "CurVar",
# "mu",
# self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,
# int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)
# else:
# self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
# self.PlainFunc = self.PlainFunc(self.InputShape[1])
# self.PlainFunc.eval()
# self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
# self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
# self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
# self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
# self.set_cpu("weight", list(self.ForwardFunc.parameters())[0].data)
# self.set_cpu("bias", list(self.ForwardFunc.parameters())[1].data)
# self.set_cpu("RunMean", self.ForwardFunc.running_mean.data)
# self.set_cpu("RunVar", self.ForwardFunc.running_var.data)
# self.ForwardFunc.eval()
def init(self, start_enclave=True):
# if self.LayerName == "Layer3.10.proxies.0.bn2":
# st()
TensorLoader.init(self, start_enclave)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.PlainFunc.eval()
self.get_cpu("weight").data.copy_(self.PlainFunc.weight.data)
self.get_cpu("bias").data.copy_(self.PlainFunc.bias.data)
self.get_cpu("RunMean").data.copy_(self.PlainFunc.running_mean.data)
# inject sqrt(running_var) instead of running_var for precision
self.get_cpu("RunVar").data.copy_(self.PlainFunc.running_var.data)
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
self.batchnorm_init(
self.LayerName,
"input", "output", "weight", "bias",
# "DerInput", "DerOutput", "DerWeight", "DerBias",
"RunMean", "RunVar", "CurMean", "CurVar",
"mu",
self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,
int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)
elif self.EnclaveMode is ExecutionModeOptions.CPU:
self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.PlainFunc.eval()
self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
self.set_cpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_cpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_cpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_cpu("RunVar", self.ForwardFunc.running_var.data)
self.ForwardFunc.eval()
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
self.set_gpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_gpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_gpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_gpu("RunVar", self.ForwardFunc.running_var.data)
self.PlainFunc.eval()
self.ForwardFunc.cuda().eval()
# def inject_params(self, params):
# if self.sid == -2:
# raise ValueError("S2 has no learnable parameters for injection")
# self.get_cpu("weight").copy_(params.weight.data)
# self.get_cpu("bias").copy_(params.bias.data)
# self.get_cpu("RunMean").copy_(params.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").copy_(params.running_var.data)
# if self.is_enclave_mode:
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
def inject_params(self, params):
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]:
self.get_cpu("weight").copy_(params.weight.data)
self.get_cpu("bias").copy_(params.bias.data)
self.get_cpu("RunMean").copy_(params.running_mean.data)
self.get_cpu("RunVar").copy_(params.running_var.data)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.get_gpu("weight").copy_(params.weight.data)
self.get_gpu("bias").copy_(params.bias.data)
self.get_gpu("RunMean").copy_(params.running_mean.data)
self.get_gpu("RunVar").copy_(params.running_var.data)
def reset_plain_bn(self):
# module = torch.BatchNorm2d()
self.get_cpu("weight").copy_(torch.ones(self.InputShape[1]))
self.get_cpu("bias").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunMean").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunVar").copy_(torch.ones(self.InputShape[1]))
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:
raise NotImplementedError
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
self.make_sure_cpu_is_latest("weight")
self.make_sure_cpu_is_latest("bias")
plain_layer.weight.data.copy_(self.get_cpu("weight"))
plain_layer.bias.data.copy_(self.get_cpu("bias"))
plain_layer.running_mean.data.copy_(self.get_cpu("RunMean"))
plain_layer.running_var.data.copy_(self.get_cpu("RunVar"))
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
if self.EnclaveMode is ExecutionModeOptions.Enclave:
NeededTensorNames = [
("input", self.InputShape, None),
# ("DerInput", self.InputShape, None),
("output", self.OutputShape, None),
# ("DerOutput", self.OutputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
("RunMean", self.WeightShape, None),
("CurMean", self.WeightShape, None),
("RunVar", self.WeightShape, None),
("CurVar", self.WeightShape, None),
("mu", self.InputShape, None),
]
else:
NeededTensorNames = [
("output", self.OutputShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
# ("DerOutput", self.OutputShape, None)
]
self.tensor_name_list = NeededTensorNames
# def forward(self):
# if self.sid == 2:
# return
# with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
# if self.is_enclave_mode:
# self.forward_tensor_transfer()
# self.batchnorm_forward(self.LayerName, int(False))
# else:
# self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
# self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
# self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
# self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# # running_var of PlainFunc is ^2 of that in the enclave
# enclave_running_var = self.get_cpu("RunVar")
# self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
def forward(self): | with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER): | 4 | 2023-11-01 10:37:37+00:00 | 8k |
NVlabs/M2T2 | demo_rlbench.py | [
{
"identifier": "collate",
"path": "m2t2/dataset.py",
"snippet": "def collate(batch):\n batch = [data for data in batch if not data.get('invalid', False)]\n batch = {key: [data[key] for data in batch] for key in batch[0]}\n if 'task' in batch:\n task = batch.pop('task')\n batch['task_is_pick'] = torch.stack([\n torch.tensor(t == 'pick') for t in task\n ])\n batch['task_is_place'] = torch.stack([\n torch.tensor(t == 'place') for t in task\n ])\n for key in batch:\n if key in [\n 'inputs', 'points', 'seg', 'object_inputs', 'bottom_center',\n 'cam_pose', 'ee_pose', 'placement_masks', 'placement_region',\n 'lang_tokens'\n ]:\n batch[key] = torch.stack(batch[key])\n if key in [\n 'contact_dirs', 'approach_dirs', 'offsets'\n ]:\n batch[key] = torch.cat(batch[key])\n return batch"
},
{
"identifier": "normalize_rgb",
"path": "m2t2/dataset_utils.py",
"snippet": "class NormalizeInverse(transforms.Normalize):\n def __init__(self, mean, std):\n def __call__(self, tensor):\ndef depth_to_xyz(depth, intrinsics):\ndef jitter_gaussian(xyz, std, clip):\ndef sample_points(xyz, num_points):\n Z = depth\n X = (u - cx) * (Z / fx)\n Y = (v - cy) * (Z / fy)"
},
{
"identifier": "create_visualizer",
"path": "m2t2/meshcat_utils.py",
"snippet": "def create_visualizer(clear=True):\n print(\n \"Waiting for meshcat server... have you started a server? Run `meshcat-server` to start a server\"\n )\n vis = meshcat.Visualizer(zmq_url=\"tcp://127.0.0.1:6000\")\n if clear:\n vis.delete()\n return vis"
},
{
"identifier": "visualize_grasp",
"path": "m2t2/meshcat_utils.py",
"snippet": "def visualize_grasp(vis, name, transform, color=[255, 0, 0], **kwargs):\n grasp_vertices = load_grasp_points()\n vis[name].set_object(\n g.Line(\n g.PointsGeometry(grasp_vertices),\n g.MeshBasicMaterial(color=rgb2hex(tuple(color)), **kwargs),\n )\n )\n vis[name].set_transform(transform.astype(np.float64))"
},
{
"identifier": "visualize_pointcloud",
"path": "m2t2/meshcat_utils.py",
"snippet": "def visualize_pointcloud(vis, name, pc, color=None, transform=None, **kwargs):\n \"\"\"\n Args:\n vis: meshcat visualizer object\n name: str\n pc: Nx3 or HxWx3\n color: (optional) same shape as pc[0 - 255] scale or just rgb tuple\n transform: (optional) 4x4 homogeneous transform\n \"\"\"\n if pc.ndim == 3:\n pc = pc.reshape(-1, pc.shape[-1])\n\n if color is not None:\n if isinstance(color, list):\n color = np.array(color)\n color = np.array(color)\n # Resize the color np array if needed.\n if color.ndim == 3:\n color = color.reshape(-1, color.shape[-1])\n if color.ndim == 1:\n color = np.ones_like(pc) * np.array(color)\n\n # Divide it by 255 to make sure the range is between 0 and 1,\n color = color.astype(np.float32) / 255\n else:\n color = np.ones_like(pc)\n\n vis[name].set_object(\n meshcat.geometry.PointCloud(position=pc.T, color=color.T, **kwargs)\n )\n\n if transform is not None:\n vis[name].set_transform(transform)"
},
{
"identifier": "M2T2",
"path": "m2t2/m2t2.py",
"snippet": "class M2T2(nn.Module):\n def __init__(\n self,\n backbone: nn.Module,\n transformer: nn.Module,\n object_encoder: nn.Module = None,\n grasp_mlp: nn.Module = None,\n set_criterion: nn.Module = None,\n grasp_criterion: nn.Module = None,\n place_criterion: nn.Module = None\n ):\n super(M2T2, self).__init__()\n self.backbone = backbone\n self.object_encoder = object_encoder\n self.transformer = transformer\n self.grasp_mlp = grasp_mlp\n self.set_criterion = set_criterion\n self.grasp_criterion = grasp_criterion\n self.place_criterion = place_criterion\n\n @classmethod\n def from_config(cls, cfg):\n args = {}\n args['backbone'] = PointNet2MSG.from_config(cfg.scene_encoder)\n channels = args['backbone'].out_channels\n obj_channels = None\n if cfg.contact_decoder.num_place_queries > 0:\n args['object_encoder'] = PointNet2MSGCls.from_config(\n cfg.object_encoder\n )\n obj_channels = args['object_encoder'].out_channels\n args['place_criterion'] = PlaceCriterion.from_config(\n cfg.place_loss\n )\n args['transformer'] = ContactDecoder.from_config(\n cfg.contact_decoder, channels, obj_channels\n )\n if cfg.contact_decoder.num_grasp_queries > 0:\n args['grasp_mlp'] = ActionDecoder.from_config(\n cfg.action_decoder, args['transformer']\n )\n matcher = HungarianMatcher.from_config(cfg.matcher)\n args['set_criterion'] = SetCriterion.from_config(\n cfg.grasp_loss, matcher\n )\n args['grasp_criterion'] = GraspCriterion.from_config(\n cfg.grasp_loss\n )\n return cls(**args)\n\n def forward(self, data, cfg):\n scene_feat = self.backbone(data['inputs'])\n object_inputs = data['object_inputs']\n object_feat = {}\n if self.object_encoder is not None:\n object_feat = self.object_encoder(object_inputs)\n if 'task_is_place' in data:\n for key, val in object_feat['features'].items():\n object_feat['features'][key] = (\n val * data['task_is_place'].view(\n data['task_is_place'].shape[0], 1, 1\n )\n )\n lang_tokens = data.get('lang_tokens')\n embedding, outputs = self.transformer(\n scene_feat, object_feat, lang_tokens\n )\n\n losses = {}\n if self.place_criterion is not None:\n losses, stats = self.place_criterion(outputs, data)\n outputs[-1].update(stats)\n\n if self.set_criterion is not None:\n set_losses, outputs = self.set_criterion(outputs, data)\n losses.update(set_losses)\n else:\n outputs = outputs[-1]\n\n if self.grasp_mlp is not None:\n mask_features = scene_feat['features'][\n self.transformer.mask_feature\n ]\n obj_embedding = [emb[idx] for emb, idx in zip(\n embedding['grasp'], outputs['matched_idx']\n )]\n confidence = [\n mask.sigmoid() for mask in outputs['matched_grasping_masks']\n ]\n grasp_outputs = self.grasp_mlp(\n data['points'], mask_features, confidence,\n cfg.mask_thresh, obj_embedding, data['grasping_masks']\n )\n outputs.update(grasp_outputs)\n contact_losses = self.grasp_criterion(outputs, data)\n losses.update(contact_losses)\n\n return outputs, losses\n\n def infer(self, data, cfg):\n scene_feat = self.backbone(data['inputs'])\n object_feat = self.object_encoder(data['object_inputs'])\n if 'task_is_place' in data:\n for key in object_feat['features']:\n object_feat['features'][key] = (\n object_feat['features'][key] * data['task_is_place'].view(\n data['task_is_place'].shape[0], 1, 1\n )\n )\n lang_tokens = data.get('lang_tokens')\n embedding, outputs = self.transformer(\n scene_feat, object_feat, lang_tokens\n )\n outputs = outputs[-1]\n\n if 'place' in embedding and embedding['place'].shape[1] > 0:\n cam_pose = None if cfg.world_coord else data['cam_pose']\n placement_outputs = infer_placements(\n data['points'], outputs['placement_masks'],\n data['bottom_center'], data['ee_pose'],\n cam_pose, cfg.mask_thresh, cfg.placement_height\n )\n outputs.update(placement_outputs)\n outputs['placement_masks'] = (\n outputs['placement_masks'].sigmoid() > cfg.mask_thresh\n )\n\n if 'grasp' in embedding and embedding['grasp'].shape[1] > 0:\n masks = outputs['grasping_masks'].sigmoid() > cfg.mask_thresh\n mask_features = scene_feat['features'][\n self.transformer.mask_feature\n ]\n if 'objectness' in outputs:\n objectness = outputs['objectness'].sigmoid()\n object_ids = [\n torch.where(\n (score > cfg.object_thresh) & mask.sum(dim=1) > 0\n )[0]\n for score, mask in zip(objectness, masks)\n ]\n outputs['objectness'] = [\n score[idx] for score, idx in zip(objectness, object_ids)\n ]\n confidence = [\n logits.sigmoid()[idx]\n for logits, idx in zip(outputs['grasping_masks'], object_ids)\n ]\n outputs['grasping_masks'] = [\n mask[idx] for mask, idx in zip(masks, object_ids)\n ]\n obj_embedding = [emb[idx] for emb, idx in zip(\n embedding['grasp'], object_ids\n )]\n else:\n obj_embedding = embedding['grasp']\n confidence = [\n logits.sigmoid() for logits in outputs['grasping_masks']\n ]\n grasp_outputs = self.grasp_mlp(\n data['points'], mask_features, confidence,\n cfg.mask_thresh, obj_embedding\n )\n outputs.update(grasp_outputs)\n\n return outputs"
},
{
"identifier": "load_image",
"path": "m2t2/rlbench_utils.py",
"snippet": "def load_image(episode_dir, camera, meta_data, frame_id):\n rgb = np.array(\n Image.open(f\"{episode_dir}/{camera}_rgb/{frame_id}.png\")\n )\n seg = np.array(\n Image.open(f\"{episode_dir}/{camera}_mask/{frame_id}.png\")\n )[..., 0]\n depth = np.array(\n Image.open(f\"{episode_dir}/{camera}_depth/{frame_id}.png\")\n )\n depth = np.sum(depth * [65536, 256, 1], axis=2)\n near = meta_data[f'{camera}_camera_near']\n far = meta_data[f'{camera}_camera_far']\n depth = near + depth / (2**24 - 1) * (far - near)\n pcd = depth_to_xyz(depth, meta_data[f'{camera}_camera_intrinsics'])\n cam_pose = meta_data[f'{camera}_camera_extrinsics'][frame_id]\n pcd = pcd @ cam_pose[:3, :3].T + cam_pose[:3, 3]\n return rgb, pcd, seg"
},
{
"identifier": "within_bound",
"path": "m2t2/rlbench_utils.py",
"snippet": "def within_bound(demo, cameras, bounds):\n pcds, rgbs, masks = [], [], []\n for camera in cameras:\n pcd = demo[f'{camera}_point_cloud']\n rgb = demo[f'{camera}_rgb']\n pcds.append(pcd.reshape(-1, 3))\n rgbs.append(rgb.reshape(-1, 3))\n masks.append(demo[f'{camera}_mask'].reshape(-1))\n pcd = np.concatenate(pcds)\n rgb = np.concatenate(rgbs)\n mask = np.concatenate(masks)\n within = (pcd[:, 0] > bounds[0]) & (pcd[:, 0] < bounds[3]) \\\n & (pcd[:, 1] > bounds[1]) & (pcd[:, 1] < bounds[4]) \\\n & (pcd[:, 2] > bounds[2]) & (pcd[:, 2] < bounds[5])\n return pcd[within], rgb[within], mask[within]"
},
{
"identifier": "gripper_pose_from_rlbench",
"path": "m2t2/rlbench_utils.py",
"snippet": "def gripper_pose_from_rlbench(pose, gripper_depth=0.1034):\n pose = pose @ tra.euler_matrix(0, 0, np.pi / 2)\n pose[:3, 3] -= gripper_depth * pose[:3, 2]\n return pose"
},
{
"identifier": "to_cpu",
"path": "m2t2/train_utils.py",
"snippet": "def to_cpu(dic):\n for key in dic:\n if isinstance(dic[key], torch.Tensor):\n dic[key] = dic[key].detach().cpu()\n elif isinstance(dic[key], list):\n if isinstance(dic[key][0], torch.Tensor):\n for i in range(len(dic[key])):\n dic[key][i] = dic[key][i].detach().cpu()\n elif isinstance(dic[key][0], list):\n for i in range(len(dic[key])):\n for j in range(len(dic[key][i])):\n if isinstance(dic[key][i][j], torch.Tensor):\n dic[key][i][j] = dic[key][i][j].detach().cpu()"
},
{
"identifier": "to_gpu",
"path": "m2t2/train_utils.py",
"snippet": "def to_gpu(dic):\n for key in dic:\n if isinstance(dic[key], torch.Tensor):\n dic[key] = dic[key].cuda()\n elif isinstance(dic[key], list):\n if isinstance(dic[key][0], torch.Tensor):\n for i in range(len(dic[key])):\n dic[key][i] = dic[key][i].cuda()\n elif isinstance(dic[key][0], list):\n for i in range(len(dic[key])):\n for j in range(len(dic[key][i])):\n if isinstance(dic[key][i][j], torch.Tensor):\n dic[key][i][j] = dic[key][i][j].detach().cuda()"
}
] | import hydra
import pickle
import torch
from m2t2.dataset import collate
from m2t2.dataset_utils import normalize_rgb, sample_points
from m2t2.meshcat_utils import (
create_visualizer, visualize_grasp, visualize_pointcloud
)
from m2t2.m2t2 import M2T2
from m2t2.rlbench_utils import (
load_image, within_bound, gripper_pose_from_rlbench
)
from m2t2.train_utils import to_cpu, to_gpu | 4,262 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Demo script showing prediction for language-conditioned tasks.
'''
def load_data(episode_dir, cfg):
with open(f"{episode_dir}/meta_data.pkl", 'rb') as f:
meta_data = pickle.load(f)
data = {}
for camera in cfg.rlbench.cameras:
rgb, xyz, mask = load_image(
episode_dir, camera, meta_data, cfg.rlbench.frame_id
)
data[f"{camera}_rgb"] = rgb
data[f"{camera}_point_cloud"] = xyz
data[f"{camera}_mask"] = mask
pcd_raw, rgb_raw, seg_raw = within_bound(
data, cfg.rlbench.cameras, cfg.rlbench.scene_bounds
)
rgb = normalize_rgb(rgb_raw[:, None]).squeeze(2).T
pcd = torch.from_numpy(pcd_raw).float()
pt_idx = sample_points(pcd_raw, cfg.data.num_points)
pcd, rgb = pcd[pt_idx], rgb[pt_idx]
with open(cfg.rlbench.lang_emb_path, 'rb') as f:
lang_emb = pickle.load(f)
model_inputs = {
'inputs': torch.cat([pcd - pcd.mean(dim=0), rgb], dim=1),
'points': pcd,
'lang_tokens': torch.from_numpy(
lang_emb[meta_data['goal_description']]
).float()
}
obj_label = meta_data['object_label'][cfg.rlbench.frame_id]
if obj_label == 0:
model_inputs.update({
'object_inputs': torch.rand(1024, 6),
'ee_pose': torch.eye(4),
'bottom_center': torch.zeros(3),
'object_center': torch.zeros(3),
'task': 'pick'
})
else:
obj_xyz = torch.from_numpy(pcd_raw[seg_raw == obj_label]).float()
obj_rgb = torch.from_numpy(rgb_raw[seg_raw == obj_label]).float()
obj_xyz_grid = torch.unique(
(obj_xyz[:, :2] / cfg.data.grid_resolution).round(), dim=0
) * cfg.data.grid_resolution
bottom_center = obj_xyz.min(dim=0)[0]
bottom_center[:2] = obj_xyz_grid.mean(dim=0)
ee_pose = torch.from_numpy(gripper_pose_from_rlbench(
meta_data['gripper_matrix'][cfg.rlbench.frame_id]
)).float()
inv_ee_pose = ee_pose.inverse()
obj_xyz = obj_xyz @ inv_ee_pose[:3, :3].T + inv_ee_pose[:3, 3]
model_inputs.update({
'object_inputs': torch.cat([
obj_xyz - obj_xyz.mean(dim=0), obj_rgb
], dim=1),
'ee_pose': ee_pose,
'bottom_center': bottom_center,
'object_center': obj_xyz.mean(dim=0),
'task': 'place'
})
raw_data = meta_data
raw_data.update({
'pcd': pcd_raw, 'rgb': rgb_raw,
'seg': seg_raw, 'object_label': obj_label
})
return model_inputs, raw_data
@hydra.main(config_path='.', config_name='rlbench', version_base='1.3')
def main(cfg):
episode_dir = f"{cfg.rlbench.base_dir}/{cfg.rlbench.task_name}/episode{cfg.rlbench.episode}"
data, raw = load_data(episode_dir, cfg)
data_batch = collate([data])
to_gpu(data_batch)
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Demo script showing prediction for language-conditioned tasks.
'''
def load_data(episode_dir, cfg):
with open(f"{episode_dir}/meta_data.pkl", 'rb') as f:
meta_data = pickle.load(f)
data = {}
for camera in cfg.rlbench.cameras:
rgb, xyz, mask = load_image(
episode_dir, camera, meta_data, cfg.rlbench.frame_id
)
data[f"{camera}_rgb"] = rgb
data[f"{camera}_point_cloud"] = xyz
data[f"{camera}_mask"] = mask
pcd_raw, rgb_raw, seg_raw = within_bound(
data, cfg.rlbench.cameras, cfg.rlbench.scene_bounds
)
rgb = normalize_rgb(rgb_raw[:, None]).squeeze(2).T
pcd = torch.from_numpy(pcd_raw).float()
pt_idx = sample_points(pcd_raw, cfg.data.num_points)
pcd, rgb = pcd[pt_idx], rgb[pt_idx]
with open(cfg.rlbench.lang_emb_path, 'rb') as f:
lang_emb = pickle.load(f)
model_inputs = {
'inputs': torch.cat([pcd - pcd.mean(dim=0), rgb], dim=1),
'points': pcd,
'lang_tokens': torch.from_numpy(
lang_emb[meta_data['goal_description']]
).float()
}
obj_label = meta_data['object_label'][cfg.rlbench.frame_id]
if obj_label == 0:
model_inputs.update({
'object_inputs': torch.rand(1024, 6),
'ee_pose': torch.eye(4),
'bottom_center': torch.zeros(3),
'object_center': torch.zeros(3),
'task': 'pick'
})
else:
obj_xyz = torch.from_numpy(pcd_raw[seg_raw == obj_label]).float()
obj_rgb = torch.from_numpy(rgb_raw[seg_raw == obj_label]).float()
obj_xyz_grid = torch.unique(
(obj_xyz[:, :2] / cfg.data.grid_resolution).round(), dim=0
) * cfg.data.grid_resolution
bottom_center = obj_xyz.min(dim=0)[0]
bottom_center[:2] = obj_xyz_grid.mean(dim=0)
ee_pose = torch.from_numpy(gripper_pose_from_rlbench(
meta_data['gripper_matrix'][cfg.rlbench.frame_id]
)).float()
inv_ee_pose = ee_pose.inverse()
obj_xyz = obj_xyz @ inv_ee_pose[:3, :3].T + inv_ee_pose[:3, 3]
model_inputs.update({
'object_inputs': torch.cat([
obj_xyz - obj_xyz.mean(dim=0), obj_rgb
], dim=1),
'ee_pose': ee_pose,
'bottom_center': bottom_center,
'object_center': obj_xyz.mean(dim=0),
'task': 'place'
})
raw_data = meta_data
raw_data.update({
'pcd': pcd_raw, 'rgb': rgb_raw,
'seg': seg_raw, 'object_label': obj_label
})
return model_inputs, raw_data
@hydra.main(config_path='.', config_name='rlbench', version_base='1.3')
def main(cfg):
episode_dir = f"{cfg.rlbench.base_dir}/{cfg.rlbench.task_name}/episode{cfg.rlbench.episode}"
data, raw = load_data(episode_dir, cfg)
data_batch = collate([data])
to_gpu(data_batch)
| model = M2T2.from_config(cfg.m2t2) | 5 | 2023-11-03 22:32:05+00:00 | 8k |
Codra-Ingenierie-Informatique/DataLab | cdl/core/gui/panel/macro.py | [
{
"identifier": "Conf",
"path": "cdl/config.py",
"snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):"
},
{
"identifier": "Macro",
"path": "cdl/core/gui/macroeditor.py",
"snippet": "class Macro(QC.QObject, ObjItf, metaclass=MacroMeta):\n \"\"\"Object representing a macro: editor, path, open/save actions, etc.\n\n Args:\n console (PythonShellWidget): Python shell widget\n name (str | None): Macro name. Defaults to None.\n \"\"\"\n\n PREFIX = \"m\"\n\n STARTED = QC.Signal()\n FINISHED = QC.Signal()\n MODIFIED = QC.Signal()\n FILE_HEADER = os.linesep.join(\n [\"# -*- coding: utf-8 -*-\", \"\", '\"\"\"DataLab Macro\"\"\"', \"\", \"\"]\n )\n MACRO_TITLE = _(\"Macro simple example\")\n MACRO_SAMPLE = f\"\"\"# {MACRO_TITLE}\n\nimport numpy as np\n\nfrom cdl.proxy import RemoteProxy\n\nproxy = RemoteProxy()\n\nz = np.random.rand(20, 20)\nproxy.add_image(\"toto\", z)\nproxy.compute_fft()\n\nprint(\"All done!\")\n\"\"\"\n\n def __init__(self, console: PythonShellWidget, title: str | None = None) -> None:\n super().__init__()\n self.console = console\n self.title = self.get_untitled_title() if title is None else title\n self.editor = CodeEditor(language=\"python\")\n self.set_code(self.MACRO_SAMPLE)\n self.editor.modificationChanged.connect(self.modification_changed)\n self.process = None\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n # No UUID to regenerate for macro\n\n @property\n def title(self) -> str:\n \"\"\"Return object title\"\"\"\n return self.objectName()\n\n @title.setter\n def title(self, title: str) -> None:\n \"\"\"Set object title\"\"\"\n self.setObjectName(title)\n\n def get_code(self) -> str:\n \"\"\"Return code to be executed\"\"\"\n text = self.editor.toPlainText()\n return os.linesep.join(text.splitlines(False))\n\n def set_code(self, code: str) -> None:\n \"\"\"Set code to be executed\n\n Args:\n code (str): Code to be executed\n \"\"\"\n self.editor.setPlainText(code)\n\n def serialize(self, writer: BaseIOHandler) -> None:\n \"\"\"Serialize this macro\n\n Args:\n writer (BaseIOHandler): Writer\n \"\"\"\n with writer.group(\"title\"):\n writer.write(self.title)\n with writer.group(\"contents\"):\n writer.write(self.get_code())\n\n def deserialize(self, reader: BaseIOHandler) -> None:\n \"\"\"Deserialize this macro\n\n Args:\n reader (BaseIOHandler): Reader\n \"\"\"\n with reader.group(\"title\"):\n self.title = reader.read_any()\n with reader.group(\"contents\"):\n self.set_code(reader.read_any())\n\n def to_file(self, filename: str) -> None:\n \"\"\"Save macro to file\n\n Args:\n filename (str): File name\n \"\"\"\n code = self.FILE_HEADER + self.get_code()\n with open(filename, \"wb\") as fdesc:\n fdesc.write(code.encode(\"utf-8\"))\n\n def from_file(self, filename: str) -> None:\n \"\"\"Load macro from file\n\n Args:\n filename (str): File name\n \"\"\"\n with open(filename, \"rb\") as fdesc:\n code = to_string(fdesc.read()).strip()\n header = self.FILE_HEADER.strip()\n if code.startswith(header):\n code = code[len(header) :].strip()\n self.set_code(code)\n self.title = osp.basename(filename)\n\n @staticmethod\n def get_untitled_title() -> str:\n \"\"\"Increment untitled number and return untitled macro title\n\n Returns:\n str: Untitled macro title\n \"\"\"\n global UNTITLED_NB # pylint: disable=global-statement\n UNTITLED_NB += 1\n untitled = _(\"Untitled\")\n return f\"{untitled} {UNTITLED_NB:02d}\"\n\n def modification_changed(self, state: bool) -> None:\n \"\"\"Method called when macro's editor modification state changed\n\n Args:\n state (bool): Modification state\n \"\"\"\n if state:\n self.MODIFIED.emit()\n\n @staticmethod\n def transcode(bytearr: QC.QByteArray) -> str:\n \"\"\"Transcode bytes to locale str\n\n Args:\n bytearr (QByteArray): Byte array\n\n Returns:\n str: Locale str\n \"\"\"\n locale_codec = QC.QTextCodec.codecForLocale()\n return locale_codec.toUnicode(bytearr.data())\n\n def get_stdout(self) -> str:\n \"\"\"Return standard output str\n\n Returns:\n str: Standard output str\n \"\"\"\n self.process.setReadChannel(QC.QProcess.StandardOutput)\n bytearr = QC.QByteArray()\n while self.process.bytesAvailable():\n bytearr += self.process.readAllStandardOutput()\n return self.transcode(bytearr)\n\n def get_stderr(self) -> str:\n \"\"\"Return standard error str\n\n Returns:\n str: Standard error str\n \"\"\"\n self.process.setReadChannel(QC.QProcess.StandardError)\n bytearr = QC.QByteArray()\n while self.process.bytesAvailable():\n bytearr += self.process.readAllStandardError()\n return self.transcode(bytearr)\n\n def write_output(self) -> None:\n \"\"\"Write text as standard output\"\"\"\n self.console.write(self.get_stdout())\n\n def write_error(self) -> None:\n \"\"\"Write text as standard error\"\"\"\n self.console.write_error(self.get_stderr())\n\n def print(self, text, error=False, eol_before=True) -> None:\n \"\"\"Print text in console, with line separator\n\n Args:\n text (str): Text to be printed\n error (bool | None): Print as error. Defaults to False.\n \"\"\"\n msg = f\"---({time.ctime()})---[{text}]{os.linesep}\"\n if eol_before:\n msg = os.linesep + msg\n self.console.write(msg, error=error, prompt=not error)\n\n def run(self) -> None:\n \"\"\"Run macro\"\"\"\n self.process = QC.QProcess()\n code = self.get_code().replace('\"', \"'\")\n cdl_path = osp.abspath(osp.join(osp.dirname(cdl.__file__), os.pardir))\n code = f\"import sys; sys.path.append(r'{cdl_path}'){os.linesep}{code}\"\n env = QC.QProcessEnvironment()\n env.insert(execenv.XMLRPCPORT_ENV, str(execenv.xmlrpcport))\n sysenv = env.systemEnvironment()\n for key in sysenv.keys():\n env.insert(key, sysenv.value(key))\n self.process.readyReadStandardOutput.connect(self.write_output)\n self.process.readyReadStandardError.connect(self.write_error)\n self.process.finished.connect(self.finished)\n self.process.setProcessEnvironment(env)\n args = [\"-c\", code]\n self.process.start(sys.executable, args)\n running = self.process.waitForStarted(3000)\n if not running:\n self.print(_(\"# ==> Unable to run '%s' macro\") % self.title, error=True)\n QW.QMessageBox.critical(\n self, _(\"Error\"), _(\"Macro Python interpreter failed to start!\")\n )\n else:\n self.print(_(\"# ==> Running '%s' macro...\") % self.title)\n self.STARTED.emit()\n\n def is_running(self) -> bool:\n \"\"\"Is macro running?\n\n Returns:\n bool: True if macro is running\n \"\"\"\n if self.process is not None:\n return self.process.state() == QC.QProcess.Running\n return False\n\n def kill(self) -> None:\n \"\"\"Kill process associated to macro\"\"\"\n if self.process is not None:\n self.print(_(\"Terminating '%s' macro\") % self.title, error=True)\n self.process.kill()\n\n # pylint: disable=unused-argument\n def finished(self, exit_code, exit_status) -> None:\n \"\"\"Process has finished\n\n Args:\n exit_code (int): Exit code\n exit_status (QC.QProcess.ExitStatus): Exit status\n \"\"\"\n self.print(_(\"# <== '%s' macro has finished\") % self.title, eol_before=False)\n self.FINISHED.emit()\n self.process = None"
},
{
"identifier": "AbstractPanel",
"path": "cdl/core/gui/panel/base.py",
"snippet": "class AbstractPanel(QW.QSplitter, metaclass=AbstractPanelMeta):\n \"\"\"Object defining DataLab panel interface,\n based on a vertical QSplitter widget\n\n A panel handle an object list (objects are signals, images, macros, ...).\n Each object must implement ``cdl.core.gui.ObjItf`` interface\n \"\"\"\n\n H5_PREFIX = \"\"\n SIG_OBJECT_ADDED = QC.Signal()\n SIG_OBJECT_REMOVED = QC.Signal()\n\n @abc.abstractmethod\n def __init__(self, parent):\n super().__init__(QC.Qt.Vertical, parent)\n self.setObjectName(self.__class__.__name__[0].lower())\n # Check if the class implements __len__, __getitem__ and __iter__\n for method in (\"__len__\", \"__getitem__\", \"__iter__\"):\n if not hasattr(self, method):\n raise NotImplementedError(\n f\"Class {self.__class__.__name__} must implement method {method}\"\n )\n\n # pylint: disable=unused-argument\n def get_serializable_name(self, obj: ObjItf) -> str:\n \"\"\"Return serializable name of object\"\"\"\n title = re.sub(\"[^-a-zA-Z0-9_.() ]+\", \"\", obj.title.replace(\"/\", \"_\"))\n name = f\"{obj.short_id}: {title}\"\n return name\n\n def serialize_object_to_hdf5(self, obj: ObjItf, writer: NativeH5Writer) -> None:\n \"\"\"Serialize object to HDF5 file\"\"\"\n with writer.group(self.get_serializable_name(obj)):\n obj.serialize(writer)\n\n def deserialize_object_from_hdf5(self, reader: NativeH5Reader, name: str) -> ObjItf:\n \"\"\"Deserialize object from a HDF5 file\"\"\"\n with reader.group(name):\n obj = self.create_object()\n obj.deserialize(reader)\n obj.regenerate_uuid()\n return obj\n\n @abc.abstractmethod\n def serialize_to_hdf5(self, writer: NativeH5Writer) -> None:\n \"\"\"Serialize whole panel to a HDF5 file\"\"\"\n\n @abc.abstractmethod\n def deserialize_from_hdf5(self, reader: NativeH5Reader) -> None:\n \"\"\"Deserialize whole panel from a HDF5 file\"\"\"\n\n @abc.abstractmethod\n def create_object(self) -> ObjItf:\n \"\"\"Create and return object\"\"\"\n\n @abc.abstractmethod\n def add_object(self, obj: ObjItf) -> None:\n \"\"\"Add object to panel\"\"\"\n\n @abc.abstractmethod\n def remove_all_objects(self):\n \"\"\"Remove all objects\"\"\"\n self.SIG_OBJECT_REMOVED.emit()"
},
{
"identifier": "execenv",
"path": "cdl/env.py",
"snippet": "DEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\n QUIET = \"quiet\"\n NORMAL = \"normal\"\n DEBUG = \"debug\"\n UNATTENDED_ARG = \"unattended\"\n VERBOSE_ARG = \"verbose\"\n SCREENSHOT_ARG = \"screenshot\"\n DELAY_ARG = \"delay\"\n XMLRPCPORT_ARG = \"xmlrpcport\"\n DONOTQUIT_ENV = \"CDL_DO_NOT_QUIT\"\n UNATTENDED_ENV = GuiDataExecEnv.UNATTENDED_ENV\n VERBOSE_ENV = GuiDataExecEnv.VERBOSE_ENV\n SCREENSHOT_ENV = GuiDataExecEnv.SCREENSHOT_ENV\n DELAY_ENV = GuiDataExecEnv.DELAY_ENV\n XMLRPCPORT_ENV = \"CDL_XMLRPCPORT\"\n CATCHER_TEST_ENV = \"CDL_CATCHER_TEST\"\nclass VerbosityLevels(enum.Enum):\nclass CDLExecEnv:\n def __init__(self):\n def to_dict(self):\n def __str__(self):\n def enable_demo_mode(self, delay: int):\n def __get_mode(env):\n def __set_mode(env, value):\n def do_not_quit(self):\n def do_not_quit(self, value):\n def unattended(self):\n def unattended(self, value):\n def catcher_test(self):\n def catcher_test(self, value):\n def screenshot(self):\n def screenshot(self, value):\n def verbose(self):\n def verbose(self, value):\n def delay(self):\n def delay(self, value: int):\n def xmlrpcport(self):\n def xmlrpcport(self, value: int):\n def parse_args(self):\n def set_env_from_args(self, args):\n def log(self, source: Any, *objects: Any) -> None:\n def print(self, *objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\n def pprint(\n self,\n obj,\n stream=None,\n indent=1,\n width=80,\n depth=None,\n compact=False,\n sort_dicts=True,\n ):"
},
{
"identifier": "create_menu_button",
"path": "cdl/utils/qthelpers.py",
"snippet": "def create_menu_button(\n parent: QW.QWidget | None = None, menu: QW.QMenu | None = None\n) -> QW.QPushButton:\n \"\"\"Create a menu button\n\n Args:\n parent (QWidget): Parent widget\n menu (QMenu): Menu to attach to the button\n\n Returns:\n QW.QPushButton: Menu button\n \"\"\"\n button = QW.QPushButton(get_icon(\"libre-gui-menu.svg\"), \"\", parent)\n button.setFlat(True)\n if menu is not None:\n button.setMenu(menu)\n return button"
},
{
"identifier": "qt_try_loadsave_file",
"path": "cdl/utils/qthelpers.py",
"snippet": "@contextmanager\ndef qt_try_loadsave_file(\n parent: QW.QWidget, filename: str, operation: str\n) -> Generator[str, None, None]:\n \"\"\"Try and open file (operation: \"load\" or \"save\")\"\"\"\n if operation == \"load\":\n text = _(\"%s could not be opened:\")\n elif operation == \"save\":\n text = _(\"%s could not be written:\")\n else:\n raise ValueError(\"operation argument must be 'load' or 'save'\")\n try:\n yield filename\n except Exception as msg: # pylint: disable=broad-except\n traceback.print_exc()\n message = (text % osp.basename(filename)) + \"\\n\" + str(msg)\n QW.QMessageBox.critical(parent, APP_NAME, message)\n finally:\n pass"
},
{
"identifier": "save_restore_stds",
"path": "cdl/utils/qthelpers.py",
"snippet": "@contextmanager\ndef save_restore_stds() -> Generator[None, None, None]:\n \"\"\"Save/restore standard I/O before/after doing some things\n (e.g. calling Qt open/save dialogs)\"\"\"\n saved_in, saved_out, saved_err = sys.stdin, sys.stdout, sys.stderr\n sys.stdout = None\n try:\n yield\n finally:\n sys.stdin, sys.stdout, sys.stderr = saved_in, saved_out, saved_err"
}
] | import re
from typing import TYPE_CHECKING
from guidata.config import CONF
from guidata.configtools import get_font, get_icon
from guidata.qthelpers import add_actions, create_action, is_dark_mode
from guidata.widgets.console.shell import PythonShellWidget
from guidata.widgets.dockable import DockableWidgetMixin
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy.compat import getopenfilename, getsavefilename
from cdl.config import Conf, _
from cdl.core.gui.macroeditor import Macro
from cdl.core.gui.panel.base import AbstractPanel
from cdl.env import execenv
from cdl.utils.qthelpers import (
create_menu_button,
qt_try_loadsave_file,
save_restore_stds,
)
from cdl.core.io.native import NativeH5Reader, NativeH5Writer | 6,457 | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""DataLab Macro Panel"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
from __future__ import annotations
if TYPE_CHECKING: # pragma: no cover
class MacroTabs(QW.QTabWidget):
"""Macro tabwidget
Args:
parent (QWidget): Parent widget
"""
SIG_CONTEXT_MENU = QC.Signal(QC.QPoint)
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setTabsClosable(True)
self.setMovable(True)
def contextMenuEvent(self, event): # pylint: disable=C0103
"""Override Qt method"""
self.SIG_CONTEXT_MENU.emit(event.globalPos())
class MacroPanel(AbstractPanel, DockableWidgetMixin):
"""Macro manager widget
Args:
parent (QWidget): Parent widget
"""
LOCATION = QC.Qt.LeftDockWidgetArea
PANEL_STR = _("Macro panel")
H5_PREFIX = "DataLab_Mac"
SIG_OBJECT_MODIFIED = QC.Signal()
FILE_FILTERS = f"{_('Python files')} (*.py)"
def __init__(self, parent: QW.QWidget | None = None) -> None:
super().__init__(parent)
self.setWindowTitle(_("Macro manager"))
self.setWindowIcon(get_icon("libre-gui-cogs.svg"))
self.setOrientation(QC.Qt.Vertical)
self.context_menu = QW.QMenu()
self.tabwidget_tb = QW.QToolBar(self)
self.tabwidget_tb.setOrientation(QC.Qt.Vertical)
self.console = PythonShellWidget(self, read_only=True)
self.console.set_light_background(not is_dark_mode())
self.console.setMaximumBlockCount(5000)
font = get_font(CONF, "console")
font.setPointSize(10)
self.console.set_font(font)
self.console.write(_("-***- Macro Console -***-"), prompt=True)
self.tabwidget = MacroTabs(self)
self.tabwidget.tabBarDoubleClicked.connect(self.rename_macro)
self.tabwidget.tabCloseRequested.connect(self.remove_macro)
self.tabwidget.currentChanged.connect(self.__update_actions)
tabwidget_with_tb = QW.QWidget(self)
tabwidget_with_tb.setLayout(QW.QHBoxLayout())
tabwidget_with_tb.layout().addWidget(self.tabwidget_tb)
tabwidget_with_tb.layout().addWidget(self.tabwidget)
# Put console in a groupbox to have a title
console_groupbox = QW.QGroupBox(_("Console"), self)
console_groupbox.setLayout(QW.QHBoxLayout())
console_groupbox.layout().addWidget(self.console)
# Put console groupbox in a frame to have a nice margin
console_frame = QW.QFrame(self)
console_frame.setLayout(QW.QHBoxLayout())
console_frame.layout().addWidget(console_groupbox)
for widget in (tabwidget_with_tb, console_frame):
self.addWidget(widget)
# Ensure that the tabwidget and the console have the same height
self.setStretchFactor(0, 1)
self.setStretchFactor(1, 0)
self.run_action = None
self.stop_action = None
self.obj_actions: list[QW.QAction] = [] # Object-dependent actions
| # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""DataLab Macro Panel"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
from __future__ import annotations
if TYPE_CHECKING: # pragma: no cover
class MacroTabs(QW.QTabWidget):
"""Macro tabwidget
Args:
parent (QWidget): Parent widget
"""
SIG_CONTEXT_MENU = QC.Signal(QC.QPoint)
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setTabsClosable(True)
self.setMovable(True)
def contextMenuEvent(self, event): # pylint: disable=C0103
"""Override Qt method"""
self.SIG_CONTEXT_MENU.emit(event.globalPos())
class MacroPanel(AbstractPanel, DockableWidgetMixin):
"""Macro manager widget
Args:
parent (QWidget): Parent widget
"""
LOCATION = QC.Qt.LeftDockWidgetArea
PANEL_STR = _("Macro panel")
H5_PREFIX = "DataLab_Mac"
SIG_OBJECT_MODIFIED = QC.Signal()
FILE_FILTERS = f"{_('Python files')} (*.py)"
def __init__(self, parent: QW.QWidget | None = None) -> None:
super().__init__(parent)
self.setWindowTitle(_("Macro manager"))
self.setWindowIcon(get_icon("libre-gui-cogs.svg"))
self.setOrientation(QC.Qt.Vertical)
self.context_menu = QW.QMenu()
self.tabwidget_tb = QW.QToolBar(self)
self.tabwidget_tb.setOrientation(QC.Qt.Vertical)
self.console = PythonShellWidget(self, read_only=True)
self.console.set_light_background(not is_dark_mode())
self.console.setMaximumBlockCount(5000)
font = get_font(CONF, "console")
font.setPointSize(10)
self.console.set_font(font)
self.console.write(_("-***- Macro Console -***-"), prompt=True)
self.tabwidget = MacroTabs(self)
self.tabwidget.tabBarDoubleClicked.connect(self.rename_macro)
self.tabwidget.tabCloseRequested.connect(self.remove_macro)
self.tabwidget.currentChanged.connect(self.__update_actions)
tabwidget_with_tb = QW.QWidget(self)
tabwidget_with_tb.setLayout(QW.QHBoxLayout())
tabwidget_with_tb.layout().addWidget(self.tabwidget_tb)
tabwidget_with_tb.layout().addWidget(self.tabwidget)
# Put console in a groupbox to have a title
console_groupbox = QW.QGroupBox(_("Console"), self)
console_groupbox.setLayout(QW.QHBoxLayout())
console_groupbox.layout().addWidget(self.console)
# Put console groupbox in a frame to have a nice margin
console_frame = QW.QFrame(self)
console_frame.setLayout(QW.QHBoxLayout())
console_frame.layout().addWidget(console_groupbox)
for widget in (tabwidget_with_tb, console_frame):
self.addWidget(widget)
# Ensure that the tabwidget and the console have the same height
self.setStretchFactor(0, 1)
self.setStretchFactor(1, 0)
self.run_action = None
self.stop_action = None
self.obj_actions: list[QW.QAction] = [] # Object-dependent actions | self.__macros: list[Macro] = [] | 1 | 2023-11-09 16:56:03+00:00 | 8k |
sxwyh/pytradecn | src/pytradecn/template/basetemplate.py | [
{
"identifier": "BaseClientMeta",
"path": "src/pytradecn/client/baseclient.py",
"snippet": "class BaseClientMeta(type):\n \"\"\"客户端元类\"\"\"\n\n clients = []\n\n def __init__(cls, name, bases, attrs):\n\n super(BaseClientMeta, cls).__init__(name, bases, attrs)\n\n if name != 'BaseClient':\n\n cls.app = Application(backend=\"uia\")\n\n if 'loginwindow' in attrs:\n # 主窗口设计成列表是为了兼容集成在行情软件中的交易客户端,例如通达信\n criterias_ = attrs['loginwindow']\n if isinstance(criterias_, dict):\n criterias_ = [criterias_, ]\n criterias = [crit.copy() for crit in criterias_]\n # 对第0个元素完成处理,第0个元素是软件的界面,此时默认top_level_only=True,意味着只查找桌面的子项\n criterias[0]['app'] = cls.app\n criterias[0]['backend'] = 'uia'\n # 不需要对1以后的元素添加top_level_only、backend、parent等属性,因为pywinauto内部可以自动处理\n # 最后一个元素是真正的窗口,必须有control_count属性\n control_count = criterias[-1].pop('control_count', 20)\n # 构造真正的WindowSpecification对象\n cls.loginwindow = WindowSpecification(criterias[0])\n cls.loginwindow.criteria.extend(criterias[1:])\n # 添加自定义child_count属性,以识别弹窗\n cls.loginwindow.child_count = control_count\n else:\n raise ClientConfigError(f'客户端{cls}缺少关键配置<loginwindow>')\n\n if 'mainwindow' in attrs:\n # 主窗口设计成列表是为了兼容集成在行情软件中的交易客户端,例如通达信\n criterias_ = attrs['mainwindow']\n if isinstance(criterias_, dict):\n criterias_ = [criterias_, ]\n criterias = [crit.copy() for crit in criterias_]\n # 对第0个元素完成处理,第0个元素是软件的界面,此时默认top_level_only=True,意味着只查找桌面的子项\n criterias[0]['app'] = cls.app\n criterias[0]['backend'] = 'uia'\n # 不需要对1以后的元素添加top_level_only、backend、parent等属性,因为pywinauto内部可以自动处理\n # 最后一个元素是真正的窗口,必须有control_count属性\n control_count = criterias[-1].pop('control_count', 4)\n # 构造真正的WindowSpecification对象\n cls.mainwindow = WindowSpecification(criterias[0])\n cls.mainwindow.criteria.extend(criterias[1:])\n # 添加自定义child_count属性,以识别弹窗\n cls.mainwindow.child_count = control_count\n else:\n raise ClientConfigError(f'客户端{cls}缺少关键配置<mainwindow>')\n\n # 最正确的做法是:cls.prompt = PromptManager(cls),但会造成import冲突,所以在其元类中实现单例\n cls.prompt = None\n\n BaseClientMeta.clients.append(cls)\n\n # def __getattribute__(cls, attr):\n # return object.__getattribute__(cls, attr)\n #\n # def __getattr__(cls, attr):\n # if attr in BaseClient.__dict__:\n # value = BaseClient.__dict__[attr]\n # if isinstance(value, classmethod):\n # # return lambda: rtn.__wrapped__(cls) # 只适用于无参数时\n # def wrapper(*args, **kwargs):\n # return value.__wrapped__(cls, *args, **kwargs)\n # return wrapper\n # else:\n # return value\n # else:\n # raise ClientConfigError(f'客户端{cls}缺少名为<{attr}>的配置')"
},
{
"identifier": "PromptManager",
"path": "src/pytradecn/prompt.py",
"snippet": "class PromptManager(metaclass=PromptManagerMeta):\n \"\"\"弹出框、提示框管理器\"\"\"\n\n def __init__(self, client):\n self.__client = client\n self.__monitorthread = None\n self.__monitorrun = False\n\n def __get_prompts(self):\n \"\"\"获得所有弹窗\"\"\"\n window = self.__client.window()\n child_count = window.child_count # 似乎以计数的方法更为可靠\n\n elements = window.element_info.children()[:-child_count] # 需要返回元素,而非包装器\n\n if elements:\n elements = [elem for elem in elements if elem.control_type in self.__client.PROMPT_TYPE_LIST]\n\n if elements:\n elements = [elem for elem in elements if len(elem.children(process=elem.process_id)) > 0]\n\n # 返回弹窗包装器\n return [PromptWrapper(ele) for ele in elements]\n\n def __find_prompts(self,\n title=None, # 对话框标题,支持正则表达式\n content=None, # 对话框内容\n text=None, # **对话框中的所有inspect.exe可见文字字符串,支持正则表达式,这是一个万能参数**\n best_match=None, # pywinauto的参数,可以用但作用不大\n func=None, # 定义一个函数去筛选\n ):\n \"\"\"依据给定的条件,筛选符合条件的对话框\"\"\"\n\n panes = self.__get_prompts()\n\n if title is not None and panes:\n panes = [pane for pane in panes if re.match(title, pane.title)]\n\n if content is not None and panes:\n panes = [pane for pane in panes if re.match(content, pane.content())]\n\n if text is not None and panes:\n panes = [pane for pane in panes if list(filter(lambda x: re.match(text, x), pane.texts()))]\n\n if best_match is not None and panes:\n panes = find_best_control_matches(best_match, panes)\n\n if func is not None and panes:\n panes = [pane for pane in panes if func(pane)]\n\n return panes\n\n def __monitor(self, duration, kwargs):\n \"\"\"监视弹窗并关闭\"\"\"\n start = time.perf_counter()\n\n while self.__monitorrun is True:\n for pane in self.__find_prompts(**kwargs):\n pane.close()\n\n time_left = duration - (time.perf_counter() - start)\n\n if time_left > 0:\n time.sleep(min(Timings.window_find_retry, time_left))\n else:\n self.__monitorrun = False\n\n def stop_monitor(self):\n if self.__monitorthread is not None and self.__monitorthread.is_alive():\n self.__monitorrun = False\n self.__monitorthread.join()\n\n def start_monitor(self, delay=0, **kwargs):\n self.stop_monitor()\n self.__monitorrun = True\n self.__monitorthread = Thread(target=self.__monitor, name='PromptClose', args=(delay, kwargs))\n self.__monitorthread.start()\n\n def close(self, **kwargs):\n \"\"\"关闭当前所有存在的弹窗\"\"\"\n self.stop_monitor()\n for pane in self.__find_prompts(**kwargs):\n pane.close()\n\n def tooltip(self, timeout=None, retry_interval=None, **kwargs):\n \"\"\"提示框应用场景,采用捕捉模式\"\"\"\n self.stop_monitor()\n # 不要设置为默认值\n if timeout is None:\n timeout = Timings.window_find_timeout * 0.6\n if retry_interval is None:\n retry_interval = Timings.window_find_retry\n\n try:\n return wait_until_passes(timeout,\n retry_interval,\n operator.getitem,\n (IndexError,),\n self.__find_prompts(**kwargs),\n 0 # 总是返回最上面的提示框\n )\n except TimeoutError:\n return None\n\n def exists(self, timeout=None, retry_interval=None, **kwargs):\n \"\"\"判断弹窗是否存在,默认非捕捉模式\"\"\"\n # 不要设置为默认值,时间会更改\n if timeout is None:\n # timeout = Timings.exists_timeout\n timeout = 0\n if retry_interval is None:\n retry_interval = Timings.exists_retry\n\n return self.tooltip(timeout, retry_interval, **kwargs) is not None"
},
{
"identifier": "BaseEngine",
"path": "src/pytradecn/engine/baseengine.py",
"snippet": "class BaseEngine(metaclass=BaseEngineMeta):\n \"\"\"登录引擎基类\"\"\"\n\n # 登录引擎的名称\n name = ''\n\n def __new__(cls, client):\n return object.__new__(BaseEngineMeta.engines[client.loginengine])\n\n def __init__(self, client):\n self.__element = client.loginwindow.element_info # 加快运行速度\n self._client = client\n self._prompt = PromptManager(client)\n\n def _get_control(self, control_define):\n # control_define 为Client格式的字符串或字典,或者pywinauto格式的字典\n return get_control_specification(parent=self.__element, control_define=control_define)\n\n @abstractmethod\n def login(self):\n raise NotImplementedError()"
},
{
"identifier": "BaseModel",
"path": "src/pytradecn/model/basemodel.py",
"snippet": "class BaseModel(metaclass=BaseModelMeta):\n \"\"\"交易模型基类\"\"\"\n\n # 交易模型的名称\n name = ''\n\n def __new__(cls, client):\n return object.__new__(BaseModelMeta.models[client.trademodel])\n\n def __init__(self, client):\n self.__element = client.mainwindow.element_info # 加快运行速度\n self._client = client\n self._prompt = PromptManager(client)\n\n def _get_control(self, control_define):\n # control_define 为Client格式的字符串或字典,或者pywinauto格式的字典\n return get_control_specification(parent=self.__element, control_define=control_define)\n\n @abstractmethod\n def initialization(self):\n \"\"\"初始化交易窗口\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def reset(self):\n \"\"\"复位交易窗口的功能\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def buy(self, code='', price=None, count=None, **kwargs):\n \"\"\"执行买入操作\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def sell(self, code='', price=None, count=None, **kwargs):\n \"\"\"执行卖出操作\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def cancel(self, **kwargs):\n \"\"\"执行撤单操作\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def query(self, target, **kwargs):\n \"\"\"执行查询操作\"\"\"\n raise NotImplementedError()"
},
{
"identifier": "logger",
"path": "src/pytradecn/logger.py",
"snippet": "class Logger(object):\n def __init__(self):\n def __addstreamhandler(self):\n def addfilehandler(self, path):\n def __set_level(self, level):\n def reset(self):\n def disable(self):\n def enable(self):\n def debug(self, msg):\n def info(self, msg):\n def warning(self, msg):\n def warn(self, msg):\n def error(self, msg):\n def exception(self, msg):\n def critical(self, msg):"
},
{
"identifier": "ClientConfigError",
"path": "src/pytradecn/error.py",
"snippet": "class ElementAmbiguousError(Exception):\nclass ElementNotFoundError(Exception):\nclass ItemKeyError(Exception):\nclass ClientConfigError(Exception):\nclass TradeFailFError(Exception):\nclass StockCountError(Exception):\nclass StockPriceError(Exception):\nclass StockCodeError(Exception):\nclass ScreenLockedError(Exception):\nclass LoginError(Exception):\nclass RecordNotFoundError(Exception):\nclass RecordAmbiguousError(Exception):"
}
] | from abc import ABCMeta, abstractmethod
from functools import wraps
from pywinauto.timings import Timings
from pywinauto.application import AppStartError
from ..client.baseclient import BaseClientMeta
from ..prompt import PromptManager
from ..engine.baseengine import BaseEngine
from ..model.basemodel import BaseModel
from ..logger import logger
from ..error import ClientConfigError, TimeoutError | 4,664 | # 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-08-20 第一次编写
#
"""
模板就象是汽车的总装车间,模板基类用来完成交易模板的基础行为,模板只用来定义功能而不实现功能,功能的实现应有交易模型(model)完成。
"""
class BaseTemplateMeta(ABCMeta):
"""交易模板元类"""
templates = {}
def __init__(cls, name, bases, attrs):
super(BaseTemplateMeta, cls).__init__(name, bases, attrs)
if name != 'BaseTemplate':
BaseTemplateMeta.templates[attrs['name']] = cls
def __call__(cls, client=None, user=None, psw=None, second=None, **account):
client = BaseClientMeta.clients[-1] if client is None else client
client.user = user if user is not None else client.user
client.psw = psw if psw is not None else client.psw
client.second = second if second is not None else client.second
client.account.update(account)
return super(BaseTemplateMeta, cls).__call__(client)
class BaseTemplate(metaclass=BaseTemplateMeta):
"""
交易模板的基类,有4个功能在其子类中必须有定义,分别是buy(买入)、sell(卖出)、cancel(撤单)、query(查询),任何在子类中定义
的功能都必须添加@BaseTemplate.connect修饰器才能正常工作。在子类中self._client用于访问客户端,self._prompt用于访问弹窗管理
器,模板基类是唯一对外接口,外部访问时使用Trader()访问,下面是在您的项目中的访问方法:
"""
name = '' # 交易模板的名称
def __new__(cls, client):
return object.__new__(BaseTemplateMeta.templates[client.tradetemplate])
def __init__(self, client):
self._client = client
self._prompt = PromptManager(client)
getattr(Timings, client.TRADE_SPEED_MODE)()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if exc_type is not None:
# logger.error(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
self.close()
def close(self):
self._prompt.stop_monitor()
self._client.close()
def __login(self):
if self._client.window() is self._client.loginwindow:
# 用户未登录
BaseEngine(self._client).login()
self._client.mainwindow.wait('ready', timeout=15) # 等待交易主窗口准备好
self._prompt.start_monitor(delay=5) # 关闭自动弹出的提示框
BaseModel.model_object.pop(self._client.key, None) # 建立新对象
self._model = BaseModel(self._client)
else:
# 用户已登录
self._model = BaseModel(self._client)
self._model.initialization() # 初始化交易窗口
self._model.reset()
def __hook(self):
self._client.hook()
def __active(self):
self._client.active()
def __setapp(self):
try:
self._client.connect()
except (AppStartError, TimeoutError):
raise ClientConfigError(f'无法启动客户端,可能路径拼写错误:{self._client.path}')
def __unlock(self):
"""软件的自动化依赖电脑在登录的情况下"""
# if win32gui.GetForegroundWindow() == 0:
# raise ScreenLockedError('屏幕被锁定') # 操作系统限制,无法用软件解锁电脑
# return self
pass
def __connect(self):
# 1.电脑屏幕是否被锁定
self.__unlock()
# 2.启动应用程序
self.__setapp()
# 3.激活应用程序
self.__active()
# 4.调用钩子
self.__hook()
# 5.登录应用程序
self.__login()
@staticmethod
def connect(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
self.__connect()
return True, func(self, *args, **kwargs)
except Exception as err:
| #
# 券商客户端自动化测试库
# Copyright (C) 2023 谁的谁([email protected]) All rights reserved.
#
# 模块功能:设计总体架构的模板规划
# 建立日期:2023.08.20
# 联系方式:谁的谁([email protected])
#
# 开源软件声明:
# 本软件遵守“MIT License”开源协议开源,仅供学习和参考。您可以自由使用或修改源代码或二进制文件,但必须保留上述版权声明。
# 该软件旨在深度学习和挖掘python pywinauto库的功能和潜力,由于环境的不确定性和该软件的不可靠性,请不要将该软件应用于
# 实盘交易。如您确需量化交易实盘功能,请使用券商提供的量化交易平台,否则由于您使用该软件实盘交易所造成的账户损失或政策风
# 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-08-20 第一次编写
#
"""
模板就象是汽车的总装车间,模板基类用来完成交易模板的基础行为,模板只用来定义功能而不实现功能,功能的实现应有交易模型(model)完成。
"""
class BaseTemplateMeta(ABCMeta):
"""交易模板元类"""
templates = {}
def __init__(cls, name, bases, attrs):
super(BaseTemplateMeta, cls).__init__(name, bases, attrs)
if name != 'BaseTemplate':
BaseTemplateMeta.templates[attrs['name']] = cls
def __call__(cls, client=None, user=None, psw=None, second=None, **account):
client = BaseClientMeta.clients[-1] if client is None else client
client.user = user if user is not None else client.user
client.psw = psw if psw is not None else client.psw
client.second = second if second is not None else client.second
client.account.update(account)
return super(BaseTemplateMeta, cls).__call__(client)
class BaseTemplate(metaclass=BaseTemplateMeta):
"""
交易模板的基类,有4个功能在其子类中必须有定义,分别是buy(买入)、sell(卖出)、cancel(撤单)、query(查询),任何在子类中定义
的功能都必须添加@BaseTemplate.connect修饰器才能正常工作。在子类中self._client用于访问客户端,self._prompt用于访问弹窗管理
器,模板基类是唯一对外接口,外部访问时使用Trader()访问,下面是在您的项目中的访问方法:
"""
name = '' # 交易模板的名称
def __new__(cls, client):
return object.__new__(BaseTemplateMeta.templates[client.tradetemplate])
def __init__(self, client):
self._client = client
self._prompt = PromptManager(client)
getattr(Timings, client.TRADE_SPEED_MODE)()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if exc_type is not None:
# logger.error(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
self.close()
def close(self):
self._prompt.stop_monitor()
self._client.close()
def __login(self):
if self._client.window() is self._client.loginwindow:
# 用户未登录
BaseEngine(self._client).login()
self._client.mainwindow.wait('ready', timeout=15) # 等待交易主窗口准备好
self._prompt.start_monitor(delay=5) # 关闭自动弹出的提示框
BaseModel.model_object.pop(self._client.key, None) # 建立新对象
self._model = BaseModel(self._client)
else:
# 用户已登录
self._model = BaseModel(self._client)
self._model.initialization() # 初始化交易窗口
self._model.reset()
def __hook(self):
self._client.hook()
def __active(self):
self._client.active()
def __setapp(self):
try:
self._client.connect()
except (AppStartError, TimeoutError):
raise ClientConfigError(f'无法启动客户端,可能路径拼写错误:{self._client.path}')
def __unlock(self):
"""软件的自动化依赖电脑在登录的情况下"""
# if win32gui.GetForegroundWindow() == 0:
# raise ScreenLockedError('屏幕被锁定') # 操作系统限制,无法用软件解锁电脑
# return self
pass
def __connect(self):
# 1.电脑屏幕是否被锁定
self.__unlock()
# 2.启动应用程序
self.__setapp()
# 3.激活应用程序
self.__active()
# 4.调用钩子
self.__hook()
# 5.登录应用程序
self.__login()
@staticmethod
def connect(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
self.__connect()
return True, func(self, *args, **kwargs)
except Exception as err: | logger.exception(str(err)) | 4 | 2023-11-03 02:22:34+00:00 | 8k |
humemarx/CPG-LCF | models/backbone2d/hrnet.py | [
{
"identifier": "ConvModule",
"path": "models/networks/backbone.py",
"snippet": "class ConvModule(nn.Module):\r\n \"\"\"A conv block that bundles conv/norm/activation layers.\r\n\r\n This block simplifies the usage of convolution layers, which are commonly\r\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\r\n It is based upon three build methods: `build_conv_layer()`,\r\n `build_norm_layer()` and `build_activation_layer()`.\r\n\r\n Besides, we add some additional features in this module.\r\n 1. Automatically set `bias` of the conv layer.\r\n 2. Spectral norm is supported.\r\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\r\n supports zero and circular padding, and we add \"reflect\" padding mode.\r\n\r\n Args:\r\n in_channels (int): Number of channels in the input feature map.\r\n Same as that in ``nn._ConvNd``.\r\n out_channels (int): Number of channels produced by the convolution.\r\n Same as that in ``nn._ConvNd``.\r\n kernel_size (int | tuple[int]): Size of the convolving kernel.\r\n Same as that in ``nn._ConvNd``.\r\n stride (int | tuple[int]): Stride of the convolution.\r\n Same as that in ``nn._ConvNd``.\r\n padding (int | tuple[int]): Zero-padding added to both sides of\r\n the input. Same as that in ``nn._ConvNd``.\r\n dilation (int | tuple[int]): Spacing between kernel elements.\r\n Same as that in ``nn._ConvNd``.\r\n groups (int): Number of blocked connections from input channels to\r\n output channels. Same as that in ``nn._ConvNd``.\r\n bias (bool | str): If specified as `auto`, it will be decided by the\r\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\r\n False. Default: \"auto\".\r\n conv_cfg (dict): Config dict for convolution layer. Default: None,\r\n which means using conv2d.\r\n norm_cfg (dict): Config dict for normalization layer. Default: None.\r\n act_cfg (dict): Config dict for activation layer.\r\n Default: dict(type='ReLU').\r\n inplace (bool): Whether to use inplace mode for activation.\r\n Default: True.\r\n with_spectral_norm (bool): Whether use spectral norm in conv module.\r\n Default: False.\r\n padding_mode (str): If the `padding_mode` has not been supported by\r\n current `Conv2d` in PyTorch, we will use our own padding layer\r\n instead. Currently, we support ['zeros', 'circular'] with official\r\n implementation and ['reflect'] with our own implementation.\r\n Default: 'zeros'.\r\n order (tuple[str]): The order of conv/norm/activation layers. It is a\r\n sequence of \"conv\", \"norm\" and \"act\". Common examples are\r\n (\"conv\", \"norm\", \"act\") and (\"act\", \"conv\", \"norm\").\r\n Default: ('conv', 'norm', 'act').\r\n \"\"\"\r\n\r\n _abbr_ = 'conv_block'\r\n\r\n def __init__(self,\r\n in_channels: int,\r\n out_channels: int,\r\n kernel_size: Union[int, Tuple[int, int]],\r\n stride: Union[int, Tuple[int, int]] = 1,\r\n padding: Union[int, Tuple[int, int]] = 0,\r\n dilation: Union[int, Tuple[int, int]] = 1,\r\n groups: int = 1,\r\n bias: bool = False,\r\n conv_type=nn.Conv2d,\r\n norm_type=None,\r\n act_type=nn.ReLU,\r\n inplace=True,\r\n order: tuple = ('conv', 'norm', 'act')):\r\n super().__init__()\r\n\r\n self.order = order\r\n self.conv_type = conv_type\r\n self.act_type = act_type\r\n self.norm_type = norm_type\r\n\r\n self.with_norm = norm_type is not None\r\n self.with_activation = act_type is not None\r\n\r\n # build convolution layer\r\n self.conv = conv_type(\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n dilation=dilation,\r\n groups=groups,\r\n bias=bias)\r\n\r\n # build normalization layers\r\n if self.with_norm:\r\n norm = norm_type(out_channels) # type: ignore\r\n self.norm_name = get_norm_name(norm_type)\r\n self.add_module(self.norm_name, norm)\r\n else:\r\n self.norm_name = None # type: ignore\r\n\r\n if self.with_activation:\r\n self.activate = act_type(inplace=inplace)\r\n self.init_weights()\r\n\r\n @property\r\n def norm(self):\r\n if self.norm_name:\r\n return getattr(self, self.norm_name)\r\n else:\r\n return None\r\n\r\n def init_weights(self):\r\n # 1. It is mainly for customized conv layers with their own\r\n # initialization manners by calling their own ``init_weights()``,\r\n # and we do not want ConvModule to override the initialization.\r\n # 2. For customized conv layers without their own initialization\r\n # manners (that is, they don't have their own ``init_weights()``)\r\n # and PyTorch's conv layers, they will be initialized by\r\n # this method with default ``kaiming_init``.\r\n # Note: For PyTorch's conv layers, they will be overwritten by our\r\n # initialization implementation using default ``kaiming_init``.\r\n if not hasattr(self.conv, 'init_weights'):\r\n if self.with_activation and isinstance(self.act_type, nn.LeakyReLU):\r\n nonlinearity = 'leaky_relu'\r\n a = 0.01\r\n else:\r\n nonlinearity = 'relu'\r\n a = 0\r\n kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)\r\n if self.with_norm:\r\n constant_init(self.bn, 1, bias=0)\r\n\r\n def forward(self,x):\r\n for layer in self.order:\r\n if layer == 'conv':\r\n x = self.conv(x)\r\n elif layer == 'norm' and self.with_norm:\r\n x = self.norm(x)\r\n elif layer == 'act' and self.with_activation:\r\n x = self.activate(x)\r\n return x\r"
},
{
"identifier": "BasicBlock",
"path": "models/backbone2d/resnet.py",
"snippet": "class BasicBlock(nn.Module):\n \"\"\"Basic block for ResNet.\"\"\"\n\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_type=nn.Conv2d,\n norm_type=nn.BatchNorm2d,\n dcn=None,\n plugins=None,\n zero_init_residual=True):\n super().__init__()\n assert dcn is None, 'Not implemented yet.'\n assert plugins is None, 'Not implemented yet.'\n self.zero_init_residual = zero_init_residual\n norm1 = norm_type(planes)\n self.norm1_name = get_norm_name(norm_type, postfix=1)\n norm2 = norm_type(planes)\n self.norm2_name = get_norm_name(norm_type, postfix=2)\n\n self.conv1 = conv_type(\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = conv_type(\n planes, \n planes, \n 3, \n padding=1, \n bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm):\n constant_init(m, val=1.0, bias=0.)\n\n if self.zero_init_residual and getattr(m, 'norm2'):\n constant_init(m, val=0.0, bias=0.)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out"
},
{
"identifier": "Bottleneck",
"path": "models/backbone2d/resnet.py",
"snippet": "class Bottleneck(nn.Module):\n \"\"\"Bottleneck block for ResNet.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if it is\n \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_type=nn.Conv2d,\n norm_type=nn.BatchNorm2d,\n dcn=None,\n plugins=None,\n zero_init_residual=True):\n super().__init__()\n assert style in ['pytorch', 'caffe']\n assert dcn is None or issubclass(dcn, _ConvNd)\n self.zero_init_residual = zero_init_residual\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_type = conv_type\n self.norm_type = norm_type\n self.dcn = dcn\n self.with_dcn = dcn is not None\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n norm1 = norm_type(planes)\n self.norm1_name = get_norm_name(norm_type, postfix=1)\n norm2 = norm_type(planes)\n self.norm2_name = get_norm_name(norm_type, postfix=2)\n norm3 = norm_type(planes*self.expansion)\n self.norm3_name = get_norm_name(norm_type, postfix=3)\n\n self.conv1 = conv_type(\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n if self.with_dcn:\n fallback_on_stride = dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = conv_type(\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n else:\n assert self.conv_type is None, 'conv_cfg must be None for DCN'\n self.conv2 = dcn(\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = conv_type(\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n return getattr(self, self.norm3_name)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, nn.GroupNorm) or isinstance(m, _BatchNorm):\n constant_init(m, val=1.0, bias=0.)\n\n if self.zero_init_residual and getattr(m, 'norm3'):\n constant_init(m, val=0.0, bias=0.)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out"
},
{
"identifier": "get_norm_name",
"path": "models/backbone2d/resnet.py",
"snippet": "def get_norm_name(norm_type, postfix=1):\n if issubclass(norm_type, _InstanceNorm): # IN is a subclass of BN\n return 'in{}'.format(postfix)\n elif issubclass(norm_type, _BatchNorm):\n return 'bn{}'.format(postfix)\n elif issubclass(norm_type, nn.GroupNorm):\n return 'gn{}'.format(postfix)\n elif issubclass(norm_type, nn.LayerNorm):\n return 'ln{}'.format(postfix)"
},
{
"identifier": "get_module",
"path": "utils/config_parser.py",
"snippet": "def get_module(config=None, *args, **kwargs):\n import models\n import datasets\n if config != None:\n if type(config) != dict:\n config = class2dic(config)\n \n for key in config:\n kwargs[key] = config[key]\n \n assert 'type' in kwargs\n method_code = eval(kwargs['type'])\n\n args_count = method_code.__init__.__code__.co_argcount\n input_params = method_code.__init__.__code__.co_varnames[1:args_count]\n\n new_kwargs = {}\n for i, value in enumerate(args):\n new_kwargs[input_params[i]] = value\n \n for key in kwargs:\n if key in input_params:\n new_kwargs[key] = kwargs[key]\n \n result_module = method_code(**new_kwargs)\n return result_module"
},
{
"identifier": "Upsample",
"path": "models/utils/wrappers.py",
"snippet": "class Upsample(nn.Module):\n\n def __init__(self,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None):\n super().__init__()\n self.size = size\n if isinstance(scale_factor, tuple):\n self.scale_factor = tuple(float(factor) for factor in scale_factor)\n else:\n self.scale_factor = float(scale_factor) if scale_factor else None\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n if not self.size:\n size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n else:\n size = self.size\n return resize(x, size, None, self.mode, self.align_corners)"
},
{
"identifier": "resize",
"path": "models/utils/wrappers.py",
"snippet": "def resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n warning=True):\n if warning:\n if size is not None and align_corners:\n input_h, input_w = tuple(int(x) for x in input.shape[2:])\n output_h, output_w = tuple(int(x) for x in size)\n if output_h > input_h or output_w > output_h:\n if ((output_h > 1 and output_w > 1 and input_h > 1\n and input_w > 1) and (output_h - 1) % (input_h - 1)\n and (output_w - 1) % (input_w - 1)):\n warnings.warn(\n f'When align_corners={align_corners}, '\n 'the output would more aligned if '\n f'input size {(input_h, input_w)} is `x+1` and '\n f'out size {(output_h, output_w)} is `nx+1`')\n return F.interpolate(input, size, scale_factor, mode, align_corners)"
}
] | import warnings
import torch.nn as nn
import torch
from models.networks.backbone import ConvModule
from models.backbone2d.resnet import BasicBlock, Bottleneck, get_norm_name
from utils.config_parser import get_module
from models.utils import resize, Upsample
from torch.nn.modules.batchnorm import _BatchNorm
from collections import OrderedDict | 6,408 | ConvModule(
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[j]),
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
elif j > i:
y = y + resize(
self.fuse_layers[i][j](x[j]),
size=x[i].shape[2:],
mode='bilinear',
align_corners=False)
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HRNet(nn.Module):
"""HRNet backbone.
This backbone is the implementation of `High-Resolution Representations
for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules (int): The number of HRModule in this stage.
- num_branches (int): The number of branches in the HRModule.
- block (str): The type of convolution block.
- num_blocks (tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels (tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Normally 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use `BN` by default.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmseg.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
| # Copyright (c) OpenMMLab. All rights reserved.
class HRModule(nn.Module):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d):
super().__init__()
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_type = norm_type
self.conv_type = conv_type
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
"""Check branches configuration."""
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \
f'{len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \
f'{len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \
f'{len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
"""Build one branch."""
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
ConvModule(
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
conv_type=self.conv_type),
self.norm_type(num_channels[branch_index] * block.expansion))
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
"""Build multiple branch."""
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
"""Build fuse layer."""
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[i]),
# we set align_corners=False for HRNet
Upsample(
scale_factor=2**(j - i),
mode='bilinear',
align_corners=False)))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[i])))
else:
conv_downsamples.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[j]),
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
elif j > i:
y = y + resize(
self.fuse_layers[i][j](x[j]),
size=x[i].shape[2:],
mode='bilinear',
align_corners=False)
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HRNet(nn.Module):
"""HRNet backbone.
This backbone is the implementation of `High-Resolution Representations
for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules (int): The number of HRModule in this stage.
- num_branches (int): The number of branches in the HRModule.
- block (str): The type of convolution block.
- num_blocks (tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels (tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Normally 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use `BN` by default.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmseg.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
| blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} | 1 | 2023-11-02 09:50:13+00:00 | 8k |
lalalamdbf/PLSE_IDRR | src/prompt-tuning/prompt/prompt_base.py | [
{
"identifier": "InputFeatures",
"path": "src/prompt-tuning/prompt/data_utils.py",
"snippet": "class InputFeatures(dict):\n \"\"\"\n The class for input to the PLM and Prompts. To make users explicitly know the available keys,\n we define a dict with a set of predefined possible keys. The default value to any key is None.\n When use it as a dict, all the keys whose values are None are invisible.\n\n This class support most of the dict's operation (See Examples). It can also be consumed by\n pytorch's default_collate in DataLoader.\n Also a :py:meth:`to_tensor()` method is build to convert the values into torch.Tensor for torch's input.\n\n Examples:\n\n .. code-block:: python\n\n in_feat = InputFeatures(**{'input_ids':[1,4,5], 'soft_token_ids': [3,4,5]}) # init from dict\n print(in_feat.keys()) # ['input_ids, 'soft_token_ids']\n in_feat['label'] = 3 # can assign value like normal dict\n print(in_feat.keys()) # ['input_ids','label', 'soft_token_ids'] (Note that it's also ordered)\n print(in_feat['label']) # 3\n in_feat['alice'] = 0 # KeyError: Key alice not in predefined set of keys\n in_feat.values() # [[1,4,5], 3, [3,4,5]] (Note that it's also ordered)\n [in_feat[key] for key in in_feat] # [[1,4,5], 3, [3,4,5]]\n new_dict= {**in_feat, 'new_key':2} # new_dict is {'input_ids': [1, 4, 5], 'label': 3, 'soft_token_ids': [3, 4, 5], 'new_key': 2}\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded)\n tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n \"\"\"\n tensorable_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','conns_index']\n all_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','guid', 'tgt_text', 'encoded_tgt_text', 'input_ids_len','conns_index']\n non_tensorable_keys = []\n\n def __init__(self,\n input_ids: Optional[Union[List, torch.Tensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n attention_mask: Optional[Union[List[int], torch.Tensor]] = None,\n token_type_ids: Optional[Union[List[int], torch.Tensor]] = None,\n label: Optional[Union[int, torch.Tensor]] = None,\n decoder_input_ids: Optional[Union[List, torch.Tensor]] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n soft_token_ids: Optional[Union[List, torch.Tensor]] = None,\n past_key_values: Optional[torch.Tensor] = None, # for prefix_tuning\n loss_ids: Optional[Union[List, torch.Tensor]] = None,\n guid: Optional[str] = None,\n tgt_text: Optional[str] = None,\n use_cache: Optional[bool] = None,\n encoded_tgt_text: Optional[str] = None,\n input_ids_len: Optional[int] = None,\n conns_index = None,\n **kwargs):\n\n self.input_ids = input_ids\n self.inputs_embeds = inputs_embeds\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.decoder_input_ids = decoder_input_ids\n self.decoder_inputs_embeds = decoder_inputs_embeds\n self.soft_token_ids = soft_token_ids\n self.past_key_values = past_key_values\n self.loss_ids = loss_ids\n self.guid = guid\n self.tgt_text = tgt_text\n self.encoded_tgt_text = encoded_tgt_text\n self.use_cache = use_cache\n self.input_ids_len = input_ids_len\n self.conns_index = conns_index\n\n for k in kwargs.keys():\n setattr(self, k, kwargs[k])\n\n @classmethod\n def add_tensorable_keys(cls, *args):\n cls.tensorable_keys.extend(args)\n\n @classmethod\n def add_not_tensorable_keys(cls, *args):\n cls.not_tensorable_keys.extend(args)\n\n @classmethod\n def add_keys(cls, *args):\n cls.all_keys.extend(args)\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def __len__(self):\n return len(self.keys())\n\n def to_tensor(self, device: str = 'cuda'):\n \"\"\"inplace operation, convert all tensorable features into :obj:`torch.tensor`\"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, torch.tensor(value))\n return self\n\n def to(self, device: str = \"cuda:0\"):\n r\"\"\"move the tensor keys to runtime device, such as gpu:0\n \"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, value.to(device))\n return self\n\n def cuda(self, device: str = \"cuda:0\"):\n r\"\"\"mimic the tensor behavior\n \"\"\"\n return self.to(device)\n\n def to_json_string(self, keep_none=False):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if isinstance(value, torch.Tensor):\n data[key] = value.detach().cpu().tolist()\n elif value is None and keep_none:\n data[key] = None\n else:\n data[key] = value\n return json.dumps(data) + \"\\n\"\n\n def keys(self, keep_none=False) -> List[str]:\n \"\"\"get all keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[str]`: keys of the InputFeatures\n \"\"\"\n if keep_none:\n return self.all_keys\n else:\n return [key for key in self.all_keys if getattr(self, key) is not None]\n\n def to_dict(self, keep_none=False) -> Dict[str, Any]:\n \"\"\"get the dict of mapping from keys to values of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`Dict[str, Any]`: dict of mapping from keys to values of the InputFeatures\n \"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if value is not None:\n data[key] = value\n elif value is None and keep_none:\n data[key] = None\n return data\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __iter__(self):\n return iter(self.keys())\n\n def __setitem__(self, key, item):\n if key not in self.all_keys:\n raise KeyError(\"Key {} not in predefined set of keys\".format(key))\n setattr(self, key, item)\n\n def values(self, keep_none=False) -> List[Any]:\n \"\"\"get the values with respect to the keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the values with respect to the keys of the InputFeatures\n \"\"\"\n return [getattr(self, key) for key in self.keys(keep_none=keep_none)]\n\n def __contains__(self, key, keep_none=False):\n return key in self.keys(keep_none)\n\n def items(self,):\n \"\"\"get the (key, value) pairs of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the (key, value) pairs of the InputFeatures\n \"\"\"\n return [(key, self.__getitem__(key)) for key in self.keys()]\n\n @staticmethod\n def collate_fct(batch: List):\n r'''\n This function is used to collate the input_features.\n\n Args:\n batch (:obj:`List[Union[Dict, InputFeatures]]`): A batch of the current data.\n\n Returns:\n :obj:`InputFeatures`: Return the :py:class:`~openprompt.data_utils.data_utils.InputFeatures of the current batch of data.\n '''\n\n\n elem = batch[0]\n return_dict = {}\n for key in elem:\n if key == \"encoded_tgt_text\":\n return_dict[key] = [d[key] for d in batch]\n else:\n try:\n return_dict[key] = default_collate([d[key] for d in batch])\n except:\n print(f\"key{key}\\n d {[batch[i][key] for i in range(len(batch))]} \")\n\n return InputFeatures(**return_dict)"
},
{
"identifier": "InputExample",
"path": "src/prompt-tuning/prompt/data_utils.py",
"snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be passed via meta.\n\n Args:\n guid (:obj:`str`, optional): A unique identifier of the example.\n text_a (:obj:`str`, optional): The placeholder for sequence of text.\n text_b (:obj:`str`, optional): A secend sequence of text, which is not always necessary.\n label (:obj:`int`, optional): The label id of the example in classification task.\n tgt_text (:obj:`Union[str,List[str]]`, optional): The target sequence of the example in a generation task..\n meta (:obj:`Dict`, optional): An optional dictionary to store arbitrary extra information for the example.\n \"\"\"\n\n def __init__(self,\n guid = None,\n text_a = \"\",\n text_b = \"\",\n label = None,\n meta: Optional[Dict] = None,\n tgt_text: Optional[Union[str,List[str]]] = None\n ):\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n self.meta = meta if meta else {}\n self.tgt_text = tgt_text\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n r\"\"\"Serialize this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n r\"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def keys(self, keep_none=False):\n return [key for key in self.__dict__.keys() if getattr(self, key) is not None]\n\n @staticmethod\n def load_examples(path: str) -> List['InputExample']:\n \"\"\"Load a set of input examples from a file\"\"\"\n with open(path, 'rb') as fh:\n return pickle.load(fh)\n\n @staticmethod\n def save_examples(examples: List['InputExample'], path: str) -> None:\n \"\"\"Save a set of input examples to a file\"\"\"\n with open(path, 'wb') as fh:\n pickle.dump(examples, fh)"
}
] | from abc import abstractmethod
from transformers.file_utils import ModelOutput
from transformers.utils.dummy_pt_objects import PreTrainedModel
from .data_utils import InputFeatures, InputExample
from typing import *
from transformers.tokenization_utils import PreTrainedTokenizer
import json
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import traceback | 3,763 |
class Template(nn.Module):
r'''
Base class for all the templates.
Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.
placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.
'''
registered_inputflag_names = ["loss_ids", "shortenable_ids"]
def __init__(self,
tokenizer: PreTrainedTokenizer,
placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},
):
super().__init__()
self.tokenizer = tokenizer
self.placeholder_mapping = placeholder_mapping
self._in_on_text_set = False
self.mixed_token_start = "{"
self.mixed_token_end = "}"
def get_default_loss_ids(self) -> List[int]:
'''Get the loss indices for the template using mask.
e.g. when self.text is ``'{"placeholder": "text_a"}. {"meta": "word"} is {"mask"}.'``,
output is ``[0, 0, 0, 0, 1, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]:
- 1 for a masked tokens.
- 0 for a sequence tokens.
'''
return [1 if 'mask' in d else 0 for d in self.text]
def get_default_shortenable_ids(self) -> List[int]:
"""Every template needs shortenable_ids, denoting which part of the template can be truncate to fit
the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other
special tokens are not shortenable.
e.g. when self.text is ``'{"placeholder": "text_a"} {"placeholder": "text_b", "shortenable": False} {"meta": "word"} is {"mask"}.'``,
output is ``[1, 0, 0, 0, 0, 0, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range ``[0, 1]``:
- 1 for the input tokens.
- 0 for the template sequence tokens.
"""
idx = []
for d in self.text:
if 'shortenable' in d:
idx.append(1 if d['shortenable'] else 0)
else:
idx.append(1 if 'placeholder' in d else 0)
return idx
def get_default_soft_token_ids(self) -> List[int]:
r'''
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
Raises:
NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.
'''
raise NotImplementedError
def incorporate_text_example(self,
|
class Template(nn.Module):
r'''
Base class for all the templates.
Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.
placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.
'''
registered_inputflag_names = ["loss_ids", "shortenable_ids"]
def __init__(self,
tokenizer: PreTrainedTokenizer,
placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},
):
super().__init__()
self.tokenizer = tokenizer
self.placeholder_mapping = placeholder_mapping
self._in_on_text_set = False
self.mixed_token_start = "{"
self.mixed_token_end = "}"
def get_default_loss_ids(self) -> List[int]:
'''Get the loss indices for the template using mask.
e.g. when self.text is ``'{"placeholder": "text_a"}. {"meta": "word"} is {"mask"}.'``,
output is ``[0, 0, 0, 0, 1, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]:
- 1 for a masked tokens.
- 0 for a sequence tokens.
'''
return [1 if 'mask' in d else 0 for d in self.text]
def get_default_shortenable_ids(self) -> List[int]:
"""Every template needs shortenable_ids, denoting which part of the template can be truncate to fit
the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other
special tokens are not shortenable.
e.g. when self.text is ``'{"placeholder": "text_a"} {"placeholder": "text_b", "shortenable": False} {"meta": "word"} is {"mask"}.'``,
output is ``[1, 0, 0, 0, 0, 0, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range ``[0, 1]``:
- 1 for the input tokens.
- 0 for the template sequence tokens.
"""
idx = []
for d in self.text:
if 'shortenable' in d:
idx.append(1 if d['shortenable'] else 0)
else:
idx.append(1 if 'placeholder' in d else 0)
return idx
def get_default_soft_token_ids(self) -> List[int]:
r'''
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
Raises:
NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.
'''
raise NotImplementedError
def incorporate_text_example(self, | example: InputExample, | 1 | 2023-11-01 08:52:36+00:00 | 8k |
JakubPluta/gymhero | gymhero/api/routes/training_unit.py | [
{
"identifier": "get_current_active_user",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_current_active_user(\n current_user: User = Depends(get_current_user),\n) -> User:\n \"\"\"Returns the current active user.\n\n Parameters:\n current_user (User, optional): The current user.\n\n Returns:\n User: The current active user.\n\n Raises:\n HTTPException: If the user is not active\n\n \"\"\"\n if not user_crud.is_active_user(current_user):\n raise _get_credential_exception(\n status_code=status.HTTP_400_BAD_REQUEST,\n details=\"Inactive user\",\n )\n return current_user"
},
{
"identifier": "get_current_superuser",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_current_superuser(\n current_user: User = Depends(get_current_user),\n) -> User:\n \"\"\"Returns the current superuser.\n\n Parameters:\n current_user (User, optional): The current user.\n\n Returns:\n User: The current superuser.\n\n Raises:\n HTTPException: If the current user is not a super user.\n\n \"\"\"\n if not user_crud.is_super_user(current_user):\n raise _get_credential_exception(\n status_code=status.HTTP_403_FORBIDDEN,\n details=\"The user does not have enough privileges\",\n )\n return current_user"
},
{
"identifier": "get_pagination_params",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_pagination_params(\n skip: int = Query(0, ge=0), limit: int = Query(10, gt=0)\n) -> Tuple[int, int]:\n \"\"\"\n Get the pagination parameters.\n\n Parameters:\n skip (int): The number of items to skip. Defaults to 0.\n limit (int): The maximum number of items to return. Defaults to 10.\n\n Returns:\n Tuple[int, int]: A tuple containing the skip and limit values.\n \"\"\"\n return skip, limit"
},
{
"identifier": "exercise_crud",
"path": "gymhero/crud/exercise.py",
"snippet": ""
},
{
"identifier": "training_unit_crud",
"path": "gymhero/crud/training_unit.py",
"snippet": "class TrainingUnitCRUD(CRUDRepository):\n def add_exercise_to_training_unit(\n self, db: Session, training_unit: TrainingUnit, exercise: Exercise\n ):\n def remove_exercise_from_training_unit(\n self, db: Session, training_unit: TrainingUnit, exercise: Exercise\n ):\n def get_exercises_in_training_unit(\n self, training_unit: TrainingUnit\n ) -> List[Exercise]:\n def check_if_exercise_in_training_unit(\n self, training_unit: TrainingUnit, exercise: Exercise\n ) -> bool:"
},
{
"identifier": "get_db",
"path": "gymhero/database/db.py",
"snippet": "def get_db() -> Generator: # pragma: no cover\n \"\"\"\n Returns a generator that yields a database session\n\n Yields:\n Session: A database session object.\n\n Raises:\n Exception: If an error occurs while getting the database session.\n \"\"\"\n\n log.debug(\"getting database session\")\n db = get_local_session(SQLALCHEMY_DATABASE_URL, False)()\n try:\n yield db\n finally: # pragma: no cover\n log.debug(\"closing database session\")\n db.close() # pragma: no cover"
},
{
"identifier": "get_logger",
"path": "gymhero/log.py",
"snippet": "def get_logger(\n name: Optional[str] = None, level: DebugLevelType = \"DEBUG\"\n) -> logging.Logger:\n \"\"\"\n Creates and configures a logger for logging messages.\n\n Parameters:\n name (Optional[str]): The name of the logger. Defaults to None.\n level (DebugLevel): The logging level. Defaults to DebugLevel.DEBUG.\n\n Returns:\n logging.Logger: The configured logger object.\n \"\"\"\n logger = logging.getLogger(name=name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(LOGGING_FORMATTER)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n if not level or level not in DebugLevels:\n logger.warning(\n \"Invalid logging level %s. Setting logging level to DEBUG.\", level\n )\n level = \"DEBUG\"\n\n logger.setLevel(level=level)\n return logger"
},
{
"identifier": "TrainingUnit",
"path": "gymhero/models/training_unit.py",
"snippet": "class TrainingUnit(Base):\n __tablename__ = \"training_units\"\n\n __table_args__ = (\n # this can be db.PrimaryKeyConstraint if you want it to be a primary key\n UniqueConstraint(\"name\", \"owner_id\"),\n )\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n description = Column(String, nullable=True)\n created_at = Column(DateTime(timezone=True), server_default=func.now())\n updated_at = Column(\n DateTime(timezone=True), server_default=func.now(), onupdate=func.now()\n )\n owner_id = Column(Integer, ForeignKey(\"users.id\"))\n owner = relationship(\"User\")\n exercises = relationship(\"Exercise\", secondary=training_unit_exercise)\n\n def __repr__(self):\n return f\"TrainingUnit(id={self.id}, name={self.name})\""
},
{
"identifier": "Exercise",
"path": "gymhero/models/exercise.py",
"snippet": "class Exercise(Base):\n __tablename__ = \"exercises\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False, unique=True)\n description = Column(String, nullable=True)\n target_body_part_id = Column(Integer, ForeignKey(\"body_parts.id\"))\n exercise_type_id = Column(Integer, ForeignKey(\"exercise_types.id\"))\n level_id = Column(Integer, ForeignKey(\"levels.id\"))\n owner_id = Column(Integer, ForeignKey(\"users.id\"))\n created_at = Column(DateTime(timezone=True), server_default=func.now())\n updated_at = Column(\n DateTime(timezone=True), server_default=func.now(), onupdate=func.now()\n )\n\n target_body_part = relationship(\"BodyPart\")\n exercise_type = relationship(\"ExerciseType\")\n level = relationship(\"Level\")\n owner = relationship(\"User\")\n\n def __repr__(self):\n return f\"<Exercise(id={self.id}, name={self.name})>\""
},
{
"identifier": "User",
"path": "gymhero/models/user.py",
"snippet": "class User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True, index=True)\n full_name = Column(String, index=True)\n email = Column(String, unique=True, index=True, nullable=False)\n hashed_password = Column(String, nullable=False)\n is_active = Column(Boolean, default=True)\n is_superuser = Column(Boolean, default=False)\n\n training_plans = relationship(\"TrainingPlan\", back_populates=\"owner\")\n training_units = relationship(\"TrainingUnit\", back_populates=\"owner\")\n\n def __repr__(self):\n return f\"<User(full_name={self.full_name}, email={self.email})>\""
},
{
"identifier": "ExerciseInDB",
"path": "gymhero/schemas/exercise.py",
"snippet": "class ExerciseInDB(ExerciseBase):\n id: int\n created_at: datetime.datetime\n updated_at: datetime.datetime\n owner_id: int\n model_config = ConfigDict(from_attributes=True)"
},
{
"identifier": "TrainingUnitCreate",
"path": "gymhero/schemas/training_unit.py",
"snippet": "class TrainingUnitCreate(TrainingUnitBase):\n pass"
},
{
"identifier": "TrainingUnitInDB",
"path": "gymhero/schemas/training_unit.py",
"snippet": "class TrainingUnitInDB(TrainingUnitBase):\n id: int\n created_at: datetime.datetime\n updated_at: datetime.datetime\n exercises: Optional[List[ExerciseOut]] = []\n owner_id: int"
},
{
"identifier": "TrainingUnitUpdate",
"path": "gymhero/schemas/training_unit.py",
"snippet": "class TrainingUnitUpdate(TrainingUnitBase):\n pass"
}
] | from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from gymhero.api.dependencies import (
get_current_active_user,
get_current_superuser,
get_pagination_params,
)
from gymhero.crud import exercise_crud, training_unit_crud
from gymhero.database.db import get_db
from gymhero.log import get_logger
from gymhero.models import TrainingUnit
from gymhero.models.exercise import Exercise
from gymhero.models.user import User
from gymhero.schemas.exercise import ExerciseInDB
from gymhero.schemas.training_unit import (
TrainingUnitCreate,
TrainingUnitInDB,
TrainingUnitUpdate,
) | 3,916 | "/{training_unit_id}",
response_model=TrainingUnitInDB,
status_code=status.HTTP_200_OK,
)
def update_training_unit(
training_unit_id: int,
training_unit_update: TrainingUnitUpdate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Updates a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to update.
training_unit_update (TrainingUnitUpdate): The updated training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The updated training unit.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit = training_unit_crud.update(
db, training_unit, training_unit_update
)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not update training unit. Error: " + str(e),
) from e
return training_unit
@router.delete("/{training_unit_id}", status_code=status.HTTP_200_OK)
def delete_training_unit(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Deletes a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to delete.
db (Session): The database session.
Returns:
Dict[str, str]: A message indicating that the training unit has been deleted.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit_crud.delete(db, training_unit)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not delete training unit. Error: " + str(e),
) from e # pragma: no cover
return {"detail": f"Training unit type with id {training_unit_id} deleted."}
@router.put(
"/{training_unit_id}/exercises/{exercise_id}/add",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def add_exercise_to_training_unit(
training_unit_id: int,
exercise_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Adds an exercise to a training unit.
Parameters:
training_unit_id (int): The ID of the training unit.
exercise_id (int): The ID of the exercise.
db (Session, optional): The database session. Defaults to Depends(get_db).
user (User, optional): The current authenticated user.
Defaults to Depends(get_current_active_user).
Returns:
The updated training unit with the added exercise.
"""
training_unit = training_unit_crud.get_one(db, TrainingUnit.id == training_unit_id)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
|
log = get_logger(__name__)
router = APIRouter()
@router.get(
"/all",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
)
def get_all_training_units(
db: Session = Depends(get_db),
pagination_params: dict = Depends(get_pagination_params),
user: User = Depends(get_current_superuser),
):
"""
Retrieves all training units with pagination.
Parameters:
db (Session): The database session.
pagination_params (dict): The pagination parameters.
Returns:
TrainingUnitsInDB: The training units retrieved from the database.
"""
skip, limit = pagination_params
return training_unit_crud.get_many(db, skip=skip, limit=limit)
@router.get(
"/all/my",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
)
def get_all_training_units_for_owner(
db: Session = Depends(get_db),
pagination_params: dict = Depends(get_pagination_params),
user: User = Depends(get_current_active_user),
):
"""
Retrieves all training units for the current user with pagination.
Parameters:
db (Session): The database session.
pagination_params (dict): The pagination parameters.
user (User): The current active user.
Returns:
TrainingUnitsInDB: The training units retrieved from the database.
"""
skip, limit = pagination_params
return training_unit_crud.get_many_for_owner(
db, owner_id=user.id, skip=skip, limit=limit
)
@router.get(
"/{training_unit_id}",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def get_training_unit_by_id(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Retrieves a training unit by ID.
Parameters:
training_unit_id (int): The ID of the training unit.
db (Session): The database session.
Returns:
Optional[TrainingUnitInDB]: The training unit retrieved
from the database, or None if not found.
"""
if user.is_superuser:
training_unit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
else:
training_unit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id, owner_id=user.id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
return training_unit
@router.get(
"/name/{training_unit_name}",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def get_training_unit_by_name(
training_unit_name: str,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Retrieves a training unit by name.
Parameters:
training_unit_name (str): The name of the training unit.
db (Session): The database session.
Returns:
Optional[TrainingUnitInDB]: The training unit retrieved
from the database, or None if not found.
"""
training_unit = training_unit_crud.get_one(
db, TrainingUnit.name == training_unit_name, owner_id=user.id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with name {training_unit_name} not found for user {user.id}",
)
return training_unit
# For superuser
@router.get(
"/name/{training_unit_name}/superuser",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
include_in_schema=False,
)
def get_training_units_by_name(
training_unit_name: str,
db: Session = Depends(get_db),
user: User = Depends(get_current_superuser),
):
"""
Retrieves a training units by name.
Parameters:
training_unit_name (str): The name of the training unit.
db (Session): The database session.
Returns:
List[Optional[TrainingUnitInDB]]: The training unit retrieved
from the database, or None if not found.
"""
training_unit = training_unit_crud.get_many(
db, TrainingUnit.name == training_unit_name
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with name {training_unit_name} not found for user {user.id}",
)
return training_unit
@router.post("/", response_model=TrainingUnitInDB, status_code=status.HTTP_201_CREATED)
def create_training_unit(
training_unit_in: TrainingUnitCreate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Creates a new training unit.
Parameters:
training_unit_in (TrainingUnitCreate): The training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The created training unit.
"""
training_unit = training_unit_crud.get_one(
db, TrainingUnit.name == training_unit_in.name, owner_id=user.id
)
if training_unit is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Training unit with name {training_unit_in.name} already exists for user {user.id}",
)
training_unit = training_unit_crud.create_with_owner(
db, training_unit_in, owner_id=user.id
)
return training_unit
@router.put(
"/{training_unit_id}",
response_model=TrainingUnitInDB,
status_code=status.HTTP_200_OK,
)
def update_training_unit(
training_unit_id: int,
training_unit_update: TrainingUnitUpdate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Updates a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to update.
training_unit_update (TrainingUnitUpdate): The updated training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The updated training unit.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit = training_unit_crud.update(
db, training_unit, training_unit_update
)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not update training unit. Error: " + str(e),
) from e
return training_unit
@router.delete("/{training_unit_id}", status_code=status.HTTP_200_OK)
def delete_training_unit(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Deletes a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to delete.
db (Session): The database session.
Returns:
Dict[str, str]: A message indicating that the training unit has been deleted.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit_crud.delete(db, training_unit)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not delete training unit. Error: " + str(e),
) from e # pragma: no cover
return {"detail": f"Training unit type with id {training_unit_id} deleted."}
@router.put(
"/{training_unit_id}/exercises/{exercise_id}/add",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def add_exercise_to_training_unit(
training_unit_id: int,
exercise_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Adds an exercise to a training unit.
Parameters:
training_unit_id (int): The ID of the training unit.
exercise_id (int): The ID of the exercise.
db (Session, optional): The database session. Defaults to Depends(get_db).
user (User, optional): The current authenticated user.
Defaults to Depends(get_current_active_user).
Returns:
The updated training unit with the added exercise.
"""
training_unit = training_unit_crud.get_one(db, TrainingUnit.id == training_unit_id)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
| exercise = exercise_crud.get_one(db, Exercise.id == exercise_id) | 3 | 2023-11-05 14:37:46+00:00 | 8k |
choderalab/chiron | chiron/integrators.py | [
{
"identifier": "SamplerState",
"path": "chiron/states.py",
"snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)"
},
{
"identifier": "ThermodynamicState",
"path": "chiron/states.py",
"snippet": "class ThermodynamicState:\n \"\"\"\n Represents the thermodynamic state of the system.\n\n Parameters\n ----------\n potential : NeuralNetworkPotential\n The potential energy function of the system.\n temperature : unit.Quantity, optional\n The temperature of the simulation.\n volume : unit.Quantity, optional\n The volume of the simulation.\n pressure : unit.Quantity, optional\n The pressure of the simulation.\n\n \"\"\"\n\n def __init__(\n self,\n potential: Optional[NeuralNetworkPotential],\n temperature: Optional[unit.Quantity] = None,\n volume: Optional[unit.Quantity] = None,\n pressure: Optional[unit.Quantity] = None,\n ):\n self.potential = potential\n\n if temperature is not None and not isinstance(temperature, unit.Quantity):\n raise TypeError(\n f\"temperature must be a unit.Quantity, got {type(temperature)} instead.\"\n )\n elif temperature is not None:\n if not temperature.unit.is_compatible(unit.kelvin):\n raise ValueError(\n f\"temperature must have units of temperature, got {temperature.unit} instead.\"\n )\n\n if volume is not None and not isinstance(volume, unit.Quantity):\n raise TypeError(\n f\"volume must be a unit.Quantity, got {type(volume)} instead.\"\n )\n elif volume is not None:\n if not volume.unit.is_compatible(unit.nanometer**3):\n raise ValueError(\n f\"volume must have units of distance**3, got {volume.unit} instead.\"\n )\n if pressure is not None and not isinstance(pressure, unit.Quantity):\n raise TypeError(\n f\"pressure must be a unit.Quantity, got {type(pressure)} instead.\"\n )\n elif pressure is not None:\n if not pressure.unit.is_compatible(unit.atmosphere):\n raise ValueError(\n f\"pressure must have units of pressure, got {pressure.unit} instead.\"\n )\n\n self.temperature = temperature\n if temperature is not None:\n self.beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * (self.temperature))\n else:\n self.beta = None\n\n self.volume = volume\n self.pressure = pressure\n\n from .utils import get_nr_of_particles\n\n self.nr_of_particles = get_nr_of_particles(self.potential.topology)\n self._check_completness()\n\n def check_variables(self) -> None:\n \"\"\"\n Check if all necessary variables are set and log the simulation ensemble.\n \"\"\"\n variables = [\n \"temperature\",\n \"volume\",\n \"pressure\",\n ]\n set_variables = [var for var in variables if getattr(self, var) is not None]\n return set_variables\n\n def _check_completness(self):\n # check which variables are set\n set_variables = self.check_variables()\n\n if len(set_variables) == 0:\n log.info(\"No variables are set.\")\n\n # print all set variables\n for var in set_variables:\n log.info(f\"{var} is set.\")\n\n if self.temperature and self.volume and self.nr_of_particles:\n log.info(\"NVT ensemble simulated.\")\n if self.temperature and self.pressure and self.nr_of_particles:\n log.info(\"NpT ensemble is simulated.\")\n\n @classmethod\n def are_states_compatible(cls, state1, state2):\n \"\"\"\n Check if two simulation states are compatible.\n\n This method should define the criteria for compatibility,\n such as matching number of particles, etc.\n\n Parameters\n ----------\n state1 : SimulationState\n The first simulation state to compare.\n state2 : SimulationState\n The second simulation state to compare.\n\n Returns\n -------\n bool\n True if states are compatible, False otherwise.\n \"\"\"\n pass\n\n def get_reduced_potential(\n self, sampler_state: SamplerState, nbr_list=None\n ) -> float:\n \"\"\"\n Compute the reduced potential for the given sampler state.\n\n Parameters\n ----------\n sampler_state : SamplerState\n The sampler state for which to compute the reduced potential.\n nbr_list : NeighborList or PairList, optional\n The neighbor list or pair list routine to use for calculating the reduced potential.\n\n Returns\n -------\n float\n The reduced potential of the system.\n\n Notes\n -----\n The reduced potential is computed as:\n u = \\beta [U(x) + p V(x) + \\mu N(x)],\n where \\beta is the inverse temperature, p is the pressure,\n \\mu is the chemical potential, x are the atomic positions,\n U(x) is the potential energy, V(x) is the box volume,\n and N(x) is the number of particles.\n \"\"\"\n if self.beta is None:\n self.beta = 1.0 / (\n unit.BOLTZMANN_CONSTANT_kB * (self.temperature * unit.kelvin)\n )\n log.debug(f\"sample state: {sampler_state.x0}\")\n reduced_potential = (\n unit.Quantity(\n self.potential.compute_energy(sampler_state.x0, nbr_list),\n unit.kilojoule_per_mole,\n )\n ) / unit.AVOGADRO_CONSTANT_NA\n log.debug(f\"reduced potential: {reduced_potential}\")\n if self.pressure is not None:\n reduced_potential += self.pressure * self.volume\n\n return self.beta * reduced_potential\n\n def kT_to_kJ_per_mol(self, energy):\n energy = energy * unit.AVOGADRO_CONSTANT_NA\n return energy / self.beta"
},
{
"identifier": "SimulationReporter",
"path": "chiron/reporters.py",
"snippet": "class SimulationReporter:\n def __init__(self, filename: str, topology: Topology, buffer_size: int = 1):\n \"\"\"\n Initialize the SimulationReporter.\n\n Parameters\n ----------\n filename : str\n Name of the HDF5 file to write the simulation data.\n topology: openmm.Topology\n buffer_size : int, optional\n Number of data points to buffer before writing to disk (default is 1).\n\n \"\"\"\n import mdtraj as md\n\n self.filename = filename\n self.buffer_size = buffer_size\n self.topology = topology\n self.buffer = {}\n self.h5file = h5py.File(filename, \"a\")\n log.info(f\"Writing simulation data to {filename}\")\n\n def get_available_keys(self):\n return self.h5file.keys()\n\n def report(self, data_dict):\n \"\"\"\n Add new data to the buffer and write the buffer to disk if it's full.\n\n Parameters\n ----------\n data_dict : dict\n Dictionary containing data to report. Keys are data labels (e.g., 'energy'),\n and values are the data points (usually numpy arrays).\n\n \"\"\"\n for key, value in data_dict.items():\n if key not in self.buffer:\n self.buffer[key] = []\n self.buffer[key].append(value)\n\n if len(self.buffer[key]) >= self.buffer_size:\n self._write_to_disk(key)\n\n def _write_to_disk(self, key):\n \"\"\"\n Write buffered data of a given key to the HDF5 file.\n\n Parameters\n ----------\n key : str\n The key of the data to write to disk.\n\n \"\"\"\n data = np.array(self.buffer[key])\n if key in self.h5file:\n dset = self.h5file[key]\n dset.resize((dset.shape[0] + data.shape[0],) + data.shape[1:])\n dset[-data.shape[0] :] = data\n else:\n log.debug(f\"Creating {key} in {self.filename}\")\n self.h5file.create_dataset(\n key, data=data, maxshape=(None,) + data.shape[1:], chunks=True\n )\n\n self.buffer[key] = []\n\n def close(self):\n \"\"\"\n Write any remaining data in the buffer to disk and close the HDF5 file.\n\n \"\"\"\n for key in self.buffer:\n if self.buffer[key]:\n self._write_to_disk(key)\n self.h5file.close()\n\n def get_property(self, name: str):\n \"\"\"\n Get the property from the HDF5 file.\n\n Parameters\n ----------\n name : str\n Name of the property to get.\n\n Returns\n -------\n np.ndarray\n The property.\n\n \"\"\"\n if name not in self.h5file:\n log.debug(f\"{name} not in HDF5 file\")\n return None\n else:\n return np.array(self.h5file[name])\n\n def get_mdtraj_trajectory(self):\n import mdtraj as md\n\n return md.Trajectory(\n xyz=self.get_property(\"traj\"),\n topology=md.Topology.from_openmm(self.topology),\n unitcell_lengths=self.get_property(\"box_vectors\"),\n unitcell_angles=self.get_property(\"box_angles\"),\n )"
}
] | import jax.numpy as jnp
from jax import random
from tqdm import tqdm
from openmm import unit
from .states import SamplerState, ThermodynamicState
from typing import Dict
from loguru import logger as log
from .reporters import SimulationReporter
from typing import Optional
from .utils import get_list_of_mass | 3,795 | # This file contains the integrator class for the Langevin dynamics simulation
class LangevinIntegrator:
"""
Langevin dynamics integrator for molecular dynamics simulation using the BAOAB splitting scheme [1].
References:
[1] Benedict Leimkuhler, Charles Matthews;
Robust and efficient configurational molecular sampling via Langevin dynamics.
J. Chem. Phys. 7 May 2013; 138 (17): 174102. https://doi.org/10.1063/1.4802990
"""
def __init__(
self,
stepsize=1.0 * unit.femtoseconds,
collision_rate=1.0 / unit.picoseconds,
save_frequency: int = 100,
reporter: Optional[SimulationReporter] = None,
) -> None:
"""
Initialize the LangevinIntegrator object.
Parameters
----------
stepsize : unit.Quantity, optional
Time step of integration with units of time. Default is 1.0 * unit.femtoseconds.
collision_rate : unit.Quantity, optional
Collision rate for the Langevin dynamics, with units 1/time. Default is 1.0 / unit.picoseconds.
save_frequency : int, optional
Frequency of saving the simulation data. Default is 100.
reporter : SimulationReporter, optional
Reporter object for saving the simulation data. Default is None.
"""
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
log.info(f"stepsize = {stepsize}")
log.info(f"collision_rate = {collision_rate}")
log.info(f"save_frequency = {save_frequency}")
self.stepsize = stepsize
self.collision_rate = collision_rate
if reporter is not None:
log.info(f"Using reporter {reporter} saving to {reporter.filename}")
self.reporter = reporter
self.save_frequency = save_frequency
self.velocities = None
def set_velocities(self, vel: unit.Quantity) -> None:
"""
Set the initial velocities for the Langevin Integrator.
Parameters
----------
vel : unit.Quantity
Velocities to be set for the integrator.
"""
self.velocities = vel
def run(
self,
sampler_state: SamplerState,
| # This file contains the integrator class for the Langevin dynamics simulation
class LangevinIntegrator:
"""
Langevin dynamics integrator for molecular dynamics simulation using the BAOAB splitting scheme [1].
References:
[1] Benedict Leimkuhler, Charles Matthews;
Robust and efficient configurational molecular sampling via Langevin dynamics.
J. Chem. Phys. 7 May 2013; 138 (17): 174102. https://doi.org/10.1063/1.4802990
"""
def __init__(
self,
stepsize=1.0 * unit.femtoseconds,
collision_rate=1.0 / unit.picoseconds,
save_frequency: int = 100,
reporter: Optional[SimulationReporter] = None,
) -> None:
"""
Initialize the LangevinIntegrator object.
Parameters
----------
stepsize : unit.Quantity, optional
Time step of integration with units of time. Default is 1.0 * unit.femtoseconds.
collision_rate : unit.Quantity, optional
Collision rate for the Langevin dynamics, with units 1/time. Default is 1.0 / unit.picoseconds.
save_frequency : int, optional
Frequency of saving the simulation data. Default is 100.
reporter : SimulationReporter, optional
Reporter object for saving the simulation data. Default is None.
"""
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
log.info(f"stepsize = {stepsize}")
log.info(f"collision_rate = {collision_rate}")
log.info(f"save_frequency = {save_frequency}")
self.stepsize = stepsize
self.collision_rate = collision_rate
if reporter is not None:
log.info(f"Using reporter {reporter} saving to {reporter.filename}")
self.reporter = reporter
self.save_frequency = save_frequency
self.velocities = None
def set_velocities(self, vel: unit.Quantity) -> None:
"""
Set the initial velocities for the Langevin Integrator.
Parameters
----------
vel : unit.Quantity
Velocities to be set for the integrator.
"""
self.velocities = vel
def run(
self,
sampler_state: SamplerState, | thermodynamic_state: ThermodynamicState, | 1 | 2023-11-07 18:17:43+00:00 | 8k |
HealthSciTech/E2E-PPG | ppg_peak_detection.py | [
{
"identifier": "ppg_peaks",
"path": "kazemi_peak_detection.py",
"snippet": "def ppg_peaks(signal, sampling_freq, seconds, overlap, minlen):\n \"\"\"\n Main function to detect peaks in PPG signals using the trained model.\n \n Args:\n signal (numpy.ndarray): PPG signal\n sampling_freq (int): Sampling frequency of the signal\n seconds (int): Signal length in seconds\n overlap (int): Overlap in seconds\n minlen (int): Minimum length of the signal in seconds\n\n Return:\n peak_indexes (list): A list containing peak indexes \n \n Reference:\n Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022). \n Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.\n \"\"\"\n # Upsample the signal if the sampling frequency is not 100 Hz\n \n resampling_flag = False\n # Check if resampling is needed and perform resampling if necessary\n if sampling_freq != KAZEMI_MODEL_SAMPLING_FREQUENCYPLING_FREQUENCY:\n signal = resample_signal(\n sig=signal, fs_origin=sampling_freq, fs_target=KAZEMI_MODEL_SAMPLING_FREQUENCYPLING_FREQUENCY)\n resampling_flag = True\n resampling_rate = sampling_freq/KAZEMI_MODEL_SAMPLING_FREQUENCYPLING_FREQUENCY\n sampling_freq = KAZEMI_MODEL_SAMPLING_FREQUENCYPLING_FREQUENCY\n \n # Split the signal into segments\n segmentized_signal = split_signal(signal, sampling_freq, seconds, overlap, minlen)\n\n # Make predictions using the pre-trained model and identify peaks\n prediction = model_prediction(segmentized_signal)\n indices = []\n\n # Process each 15-seconds segmentized signal\n for i in range(len(segmentized_signal)):\n # Call the wrapper function\n peak_index = Wrapper_function(prediction[i], segmentized_signal[i])\n peak_index = [item + sampling_freq * i * seconds for item in peak_index]\n indices.append(peak_index)\n\n peak_indexes = [item for sublist in indices for item in sublist]\n \n # If resampling performed, update indices according to the original sampling rate\n if resampling_flag:\n peak_indexes = [int(peak * resampling_rate) for peak in peak_indexes]\n \n return peak_indexes"
},
{
"identifier": "sqa",
"path": "ppg_sqa.py",
"snippet": "def sqa(\n sig: np.ndarray,\n sampling_rate: int,\n filter_signal: bool = True,\n) -> Tuple[list, list]:\n \"\"\"\n Perform PPG Signal Quality Assessment (SQA).\n \n This function assesses the quality of a PPG signal by classifying its segments\n as reliable (clean) or unrelaible (noisy) using a pre-trained model.\n\n The clean indices represent parts of the PPG signal that are deemed reliable,\n while the noisy indices indicate parts that may be affected by noise or artifacts.\n \n Args:\n sig (np.ndarray): PPG signal.\n sampling_rate (int): Sampling rate of the PPG signal.\n filter_signal (bool): True if the signal has not filtered using\n a bandpass filter.\n \n Return:\n clean_indices: A list of clean indices.\n noisy_indices: A list of noisy indices.\n \n \n Reference:\n Feli, M., Azimi, I., Anzanpour, A., Rahmani, A. M., & Liljeberg, P. (2023).\n An energy-efficient semi-supervised approach for on-device photoplethysmogram signal quality assessment. \n Smart Health, 28, 100390.\n\n\n \"\"\"\n # Load pre-trained model and normalization scaler\n scaler = joblib.load(os.path.join(MODEL_PATH, SCALER_FILE_NAME))\n model = pickle.load(\n open(os.path.join(MODEL_PATH, SQA_MODEL_FILE_NAME), 'rb'))\n \n resampling_flag = False\n # Check if resampling is needed and perform resampling if necessary\n if sampling_rate != SQA_MODEL_SAMPLING_FREQUENCY:\n sig = resample_signal(\n sig=sig, fs_origin=sampling_rate, fs_target=SQA_MODEL_SAMPLING_FREQUENCY)\n resampling_flag = True\n resampling_rate = sampling_rate/SQA_MODEL_SAMPLING_FREQUENCY\n sampling_rate = SQA_MODEL_SAMPLING_FREQUENCY\n\n # Apply bandpass filter if needed\n if filter_signal:\n sig = bandpass_filter(\n sig=sig, fs=sampling_rate, lowcut=0.5, highcut=3)\n\n # Generate indices for the PPG signal\n sig_indices = np.arange(len(sig))\n\n # Segment the PPG signal into\n segments, segments_indices = segmentation(\n sig=sig,\n sig_indices=sig_indices,\n sampling_rate=sampling_rate,\n method='shifting',\n segment_size=SEGMENT_SIZE,\n shift_size=SHIFTING_SIZE,\n )\n\n # Initialize lists to store all reliable and unreliable segments\n reliable_segments_all = []\n unreliable_segments_all = []\n reliable_indices_all = []\n unreliable_indices_all = []\n\n # Loop through the segments for feature extraction and classification\n for idx, segment in enumerate(segments):\n\n # Feature extraction\n features = feature_extraction(segment, sampling_rate)\n\n # Classification\n if np.isnan(np.array(features)).any():\n pred = 1\n else:\n features_norm = scaler.transform([features])\n pred = model.predict(features_norm)\n\n # Categorize segments based on classification result\n if pred == 0:\n reliable_segments_all.append(segment)\n reliable_indices_all.append(segments_indices[idx])\n else:\n unreliable_segments_all.append(segment)\n unreliable_indices_all.append(segments_indices[idx])\n\n # Generate flatten lists of reliable indices as clean indices\n clean_indices = list(set([item for segment in reliable_indices_all for item in segment]))\n\n # The indices that dont exist in the flat list of clean indices indicate unreliable indices\n unreliable_indices = [item for item in sig_indices if item not in clean_indices]\n\n # Unflat the unreliable_indices list to separte noisy parts\n noisy_indices = []\n for group in mit.consecutive_groups(unreliable_indices):\n noisy_indices.append(list(group))\n noisy_indices = [noisy_indices[i] for i in range(\n len(noisy_indices)) if len(noisy_indices[i]) > SHIFTING_SIZE]\n \n # If resampling performed, update indices according to the original sampling rate\n if resampling_flag:\n clean_indices = [int(index * resampling_rate) for index in clean_indices]\n noisy_indices = [[int(index * resampling_rate) for index in noise] for noise in noisy_indices]\n\n\n return clean_indices, noisy_indices"
},
{
"identifier": "reconstruction",
"path": "ppg_reconstruction.py",
"snippet": "def reconstruction(\n sig: np.ndarray,\n clean_indices: list,\n noisy_indices:list,\n sampling_rate: int,\n filter_signal: bool = True,\n) -> Tuple[np.ndarray, list, list]:\n '''\n Reconstruct noisy PPG signals using GAN.\n\n Args:\n sig (np.ndarray): Original PPG signal.\n clean_indices (list): List of indices representing clean parts.\n noisy_indices (list): List of indices representing noisy parts.\n sampling_rate (int): Sampling rate of the signal.\n filter_signal (bool): True if the signal has not filtered using\n a bandpass filter.\n\n Return:\n ppg_signal (np.ndarray): Reconstructed PPG signal (if reconstruction is\n applied; otherwise, returns the original signal).\n clean_indices (list): Updated indices of clean parts (if reconstruction is\n applied; otherwise, returns the original indices of clean parts).\n noisy_indices (list): Updated indices of noisy parts (if reconstruction is\n applied; otherwise, returns the original indices of noisy parts).\n \n Reference:\n Wang, Y., Azimi, I., Kazemi, K., Rahmani, A. M., & Liljeberg, P. (2022, July). \n Ppg signal reconstruction using deep convolutional generative adversarial network. \n In 2022 44th Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC) (pp. 3387-3391). IEEE.\n\n '''\n\n # Set the Generator class in the main module for compatibility with the saved GAN model\n setattr(__main__, \"Generator\", Generator)\n\n # Load GAN model parameters\n generator = torch.load(os.path.join(\n MODEL_PATH, GAN_MODEL_FILE_NAME), map_location=torch.device('cpu'))\n device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n resampling_flag = False\n # Check if resampling is needed and perform resampling if necessary\n if sampling_rate != RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY:\n sig = resample_signal(\n sig=sig, fs_origin=sampling_rate, fs_target=RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY)\n resampling_flag = True\n resampling_rate = sampling_rate/RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY\n sampling_rate_original = sampling_rate\n sampling_rate = RECONSTRUCTION_MODEL_SAMPLING_FREQUENCY\n\n # Apply bandpass filter if needed\n if filter_signal:\n sig = bandpass_filter(\n sig=sig, fs=sampling_rate, lowcut=0.5, highcut=3)\n\n # Scale the original PPG signal for further processing\n sig_scaled= preprocessing.scale(sig)\n\n # Maximum length for reconstruction\n max_rec_length = int(MAX_RECONSTRUCTION_LENGTH_SEC*sampling_rate)\n\n # Flag to indicate if reconstruction has occurred\n reconstruction_flag = False\n\n # Iterate over noisy parts for reconstruction\n for noise in noisy_indices:\n if len(noise) <= max_rec_length:\n noise_start_idx = noise[0]\n # Check if there is sufficient preceding clean signal for reconstruction\n if noise_start_idx >= max_rec_length:\n # Check if the preceding signal is clean\n if set(range(\n noise_start_idx - max_rec_length,\n noise_start_idx)).issubset(clean_indices):\n # Perform noise reconstruction for the current noise\n reconstructed_noise = gan_rec(\n sig[noise_start_idx-max_rec_length:noise_start_idx],\n noise, sampling_rate, generator, device)\n\n # Upsample the reconstructed noise\n reconstructed_noise_res = resample(\n reconstructed_noise,\n int(len(reconstructed_noise)*UPSAMPLING_RATE))\n\n # Upsample the clean signal before the noise\n sig_before_noise_res = resample(\n sig_scaled[:noise_start_idx],\n int(len(sig_scaled[:noise_start_idx])*UPSAMPLING_RATE))\n\n # Upsample the clean signal after the noise\n sig_after_noise_res = resample(\n sig_scaled[noise[-1]:],\n int(len(sig_scaled[noise[-1]:])*UPSAMPLING_RATE))\n\n # Find peaks in the clean signal before the noise\n peaks_sig_before_noise, _ = find_peaks(\n sig_before_noise_res,\n int(sampling_rate*UPSAMPLING_RATE))\n\n # Check if the reconstructed noise is long enough\n # (considering a threshold of 2 seconds)\n if len(reconstructed_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:\n try:\n # Find peaks in the reconstructed noise\n peaks_noise_rec, _ = find_peaks(\n reconstructed_noise_res,\n int(sampling_rate*UPSAMPLING_RATE))\n\n # Check if the clean signal after the noise is long enough\n # (considering a threshold of 2 seconds)\n if len(sig_after_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:\n # Find peaks in the clean signal after the noise\n peaks_sig_after_noise, _ = find_peaks(\n sig_after_noise_res,\n int(sampling_rate*UPSAMPLING_RATE))\n\n # Merge the reconstructed noise with the clean signal\n sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \\\n list(reconstructed_noise_res[peaks_noise_rec[0]:peaks_noise_rec[-1]]) + \\\n list(sig_after_noise_res[peaks_sig_after_noise[0]:])\n\n # If the clean signal after the noise is too short, there is no need\n # for peak detection\n else:\n # Merge the reconstructed noise with the clean signal\n sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \\\n list(reconstructed_noise_res[peaks_noise_rec[0]:peaks_noise_rec[-1]]) + \\\n list(sig_after_noise_res)\n except:\n continue\n\n else:\n try:\n # Check if the clean signal after the noise is long enough\n # (considering a threshold of 2 seconds)\n if len(sig_after_noise_res) >= 2*sampling_rate*UPSAMPLING_RATE:\n # Find peaks in the clean signal after the noise\n peaks_sig_after_noise, _ = find_peaks(\n sig_after_noise_res,\n int(sampling_rate*UPSAMPLING_RATE))\n\n # Merge the reconstructed noise with the clean signal\n sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \\\n list(reconstructed_noise_res) + \\\n list(sig_after_noise_res[peaks_sig_after_noise[0]:])\n\n # If the clean signal after the noise is too short, there is no need\n # for peak detection\n else:\n # Merge the reconstructed noise with the clean signal\n sig_res = list(sig_before_noise_res[:peaks_sig_before_noise[-1]]) + \\\n list(reconstructed_noise_res) + \\\n list(sig_after_noise_res)\n except:\n continue\n\n # Resample the reconstructed signal to the original length of the signal\n sig_scaled= resample(sig_res, len(sig_scaled))\n\n # Descale the reconstructed signal\n ppg_descaled = (sig_scaled*np.std(sig)) + np.mean(sig)\n\n # Set the reconstruction flag to True\n reconstruction_flag = True\n\n # Perform the signal quality assessment to ensure that the reconstructed\n # signal is not distorted\n clean_indices, noisy_indices = sqa(\n sig=ppg_descaled, sampling_rate=sampling_rate, filter_signal=False)\n\n # Check if there was a reconstruction\n if reconstruction_flag:\n ppg_signal = ppg_descaled\n else:\n ppg_signal = sig\n \n # If resampling performed, update the reconstructed signal and indices according to the original sampling rate\n if resampling_flag:\n clean_indices = [int(index * resampling_rate) for index in clean_indices]\n noisy_indices = [[int(index * resampling_rate) for index in noise] for noise in noisy_indices]\n ppg_signal = resample_signal(\n sig=ppg_signal, fs_origin=sampling_rate, fs_target=sampling_rate_original)\n \n # Return the reconstructed or original PPG signal, along with updated indices\n return ppg_signal, clean_indices, noisy_indices"
},
{
"identifier": "clean_seg_extraction",
"path": "ppg_clean_extraction.py",
"snippet": "def clean_seg_extraction(\n sig: np.ndarray,\n noisy_indices: list,\n window_length: int\n) -> list:\n \n \"\"\"\n Scan the clean parts of the signal and extract clean segments based on the input window length.\n \n Args:\n sig (numpy.ndarray): Input PPG signal.\n noisy_indices (list): List of noisy segment indices.\n window_length (int): Desired window length for clean segment extraction in terms of samples.\n \n Return:\n clean_segments (list): List of clean PPG segments with the specified window length and their starting index.\n \"\"\"\n \n def find_clean_parts(quality_lst:list) -> list:\n '''\n Scan the quality vector and find the start and end indices of clean parts.\n\n Args:\n quality_lst (list): Quality vector of the signal (0 indictes clean and 1 indicates noisy)\n \n Return:\n start_end_clean (list): Start and end indices of the clean parts in a list of tuples\n '''\n \n start_end_clean = []\n start = 0\n for i in range(len(quality_lst)-1):\n if quality_lst[start] == quality_lst[i+1]:\n if i+1 == len(quality_lst)-1:\n end = i+1\n if quality_lst[start] == 0:\n start_end_clean.append((start,end))\n else:\n continue\n \n else:\n end = i\n if quality_lst[start] == 0:\n start_end_clean.append((start,end))\n \n start = i+1\n \n return start_end_clean\n \n \n # Create a new DataFrame to store PPG, and quality information\n quality_df = pd.DataFrame(columns=['ppg','quality'])\n \n # Flatten the noise indices list\n flat_list_noise = [item for noise in noisy_indices for item in noise]\n \n # Define a quality vector (0 indictes clean and 1 indicates noisy)\n quality = [1 if i in flat_list_noise else 0 for i in range(len(sig))]\n \n # Store ppg signal with quality vector in dataframe\n quality_df['quality'] = quality\n quality_df['ppg'] = sig\n \n # Find start and end indices of clean parts in the quality list\n start_end_clean_idx = find_clean_parts(quality_df['quality'].tolist())\n \n # Initialize a list to store total clean segments with the specified window length\n clean_segments = []\n\n # Extract clean segments based on window length\n for indices in start_end_clean_idx:\n # Check if the current clean part has the required window length\n if (indices[1] - indices[0]) >= window_length:\n # Select the current clean part\n clean_part = quality_df['ppg'][indices[0] : indices[1]].tolist()\n \n # Calculate the number of segments with the specified window length that can be extarcted from the current clean part\n num_segments = len(clean_part) // window_length\n \n # Extract clean segment with the specified window length from current clean part and their starting indices\n segments = [((indices[0] + i * window_length), clean_part[i * window_length: (i + 1) * window_length]) for i in range(num_segments)]\n \n # Add extracted segments to total clean segments\n clean_segments.extend(segments)\n\n \n return clean_segments"
},
{
"identifier": "normalize_data",
"path": "utils.py",
"snippet": "def normalize_data(sig: np.ndarray) -> np.ndarray:\n \"\"\"\n Normalize the input signal between zero and one\n \n Args:\n sig (np.ndarray): PPG signal.\n \n Return:\n np.ndarray: Normalized signal\n \"\"\"\n return (sig - np.min(sig)) / (np.max(sig) - np.min(sig))"
},
{
"identifier": "get_data",
"path": "utils.py",
"snippet": "def get_data(\n file_name: str,\n local_directory: str = \"data\",\n usecols: List[str] = ['ppg'],\n) -> np.ndarray:\n \"\"\"\n Import data (e.g., PPG signals)\n \n Args:\n file_name (str): Name of the input file\n local_directory (str): Data directory\n usecols (List[str]): The columns to read from the input file\n \n Return:\n sig (np.ndarray): the input signal (e.g., PPG)\n \"\"\"\n try:\n # Construct the file path\n file_path = os.path.join(local_directory, file_name)\n # Load data from the specified CSV file\n input_data = pd.read_csv(\n file_path,\n delim_whitespace=True,\n usecols=usecols)\n # Extract signal\n sig = input_data[usecols[0]].values\n return sig\n except FileNotFoundError:\n print(f\"File not found: {file_name}\")\n except pd.errors.EmptyDataError:\n print(f\"Empty data in file: {file_name}\")\n except Exception as e:\n print(f\"An unexpected error occurred: {e}\")\n # Return None in case of an error\n return None"
}
] | import neurokit2 as nk
import heartpy as hp
import numpy as np
import warnings
from heartpy.datautils import rolling_mean
from scipy import signal
from kazemi_peak_detection import ppg_peaks
from ppg_sqa import sqa
from ppg_reconstruction import reconstruction
from ppg_clean_extraction import clean_seg_extraction
from utils import normalize_data, get_data | 5,336 | # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
def peak_detection(
clean_segments: list,
sampling_rate: int,
method: str ='kazemi') -> list:
'''
Detect peaks in clean PPG segments using specified peak detection method.
Args:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
sampling_rate: Sampling rate of the PPG signal.
method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)
Return:
total_peaks (list): List of lists, each containing the detected peaks for a corresponding clean segment.
Refernces:
Kazemi method: Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022).
Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.
Neurokit method: Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H., ... & Chen, S. A. (2021).
NeuroKit2: A Python toolbox for neurophysiological signal processing. Behavior research methods, 1-8.
HeartPY method: Van Gent, P., Farah, H., Nes, N., & van Arem, B. (2018, June).
Heart rate analysis for human factors: Development and validation of an open source toolkit for noisy naturalistic heart rate data.
In Proceedings of the 6th HUMANIST Conference (pp. 173-178).
'''
# Initialize a list to store total peaks
total_peaks = []
# Check the deisred peak detection method
if method == 'nk':
# Neurokit method
upsampling_rate = 2
sampling_rate_new = sampling_rate * upsampling_rate
for i in range(len(clean_segments)):
# Normalize PPG signal
| # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
def peak_detection(
clean_segments: list,
sampling_rate: int,
method: str ='kazemi') -> list:
'''
Detect peaks in clean PPG segments using specified peak detection method.
Args:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
sampling_rate: Sampling rate of the PPG signal.
method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)
Return:
total_peaks (list): List of lists, each containing the detected peaks for a corresponding clean segment.
Refernces:
Kazemi method: Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022).
Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.
Neurokit method: Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H., ... & Chen, S. A. (2021).
NeuroKit2: A Python toolbox for neurophysiological signal processing. Behavior research methods, 1-8.
HeartPY method: Van Gent, P., Farah, H., Nes, N., & van Arem, B. (2018, June).
Heart rate analysis for human factors: Development and validation of an open source toolkit for noisy naturalistic heart rate data.
In Proceedings of the 6th HUMANIST Conference (pp. 173-178).
'''
# Initialize a list to store total peaks
total_peaks = []
# Check the deisred peak detection method
if method == 'nk':
# Neurokit method
upsampling_rate = 2
sampling_rate_new = sampling_rate * upsampling_rate
for i in range(len(clean_segments)):
# Normalize PPG signal | ppg_normed = normalize_data(clean_segments[i][1]) | 4 | 2023-11-07 22:52:14+00:00 | 8k |
Antelcat/ida_copilot | ida_copilot.py | [
{
"identifier": "panel",
"path": "ida_copilot/panel.py",
"snippet": "class Singleton(type):\nclass CopilotPanel(idaapi.PluginForm, metaclass=Singleton):\nclass CopilotPanelCallbackManager(BaseCallbackHandler):\nclass ShowCopilotPanel(idaapi.action_handler_t):\n def __call__(cls, *args, **kwargs):\n def __init__(self):\n def OnCreate(self, form):\n def OnClose(self, form):\n def Show(self, **kwargs):\n def on_text(self, text: str, **kwargs):\n def __init__(self, panel):\n def activate(self, ctx):\n def update(self, ctx):"
},
{
"identifier": "Copilot",
"path": "ida_copilot/copilot.py",
"snippet": "class Copilot:\n def run(self, temperature=0.2, model='gpt-3.5-turbo-0613'):\n ea = idaapi.get_screen_ea()\n func_name = idaapi.get_func_name(ea)\n\n tools = [\n self.__GetAddressInfoTool(),\n self.__GetDefinitionTool(),\n self.__GetPseudocodeTool(),\n self.__SetFunctionCommentTool(),\n self.__SetFunctionDefinitionTool(),\n self.__SetFunctionNameTool(),\n self.__GetIsMyWorkDoneTool(ea)\n ]\n\n agent = initialize_agent(\n agent_type=AgentType.OPENAI_MULTI_FUNCTIONS,\n llm=ChatOpenAI(temperature=temperature, model=model),\n tools=tools,\n # callback_manager=BaseCallbackManager(handlers=[\n # CopilotPanelCallbackManager()]),\n verbose=True,\n )\n\n prompt = prompts.default_prompt_zh.format(\n binary_description=f'name: {func_name}, address 0x{ea:x}'\n # pseudocode=pseudocode\n )\n\n # 开启新线程运行agent\n t = concurrent.futures.ThreadPoolExecutor()\n loop = asyncio.get_event_loop()\n loop.run_in_executor(t, agent.run, prompt)\n\n class __GetAddressInfoTool(BaseTool):\n name = 'get_address_info'\n description = ('Given a hex address or function name, show its information. '\n '**Input Format**: `<hex_address_or_function_name>`. '\n '**Input Example1**: `sub_140007080`. '\n '**Input Example2**: `0x140007080`.')\n\n @staticmethod\n def __get_address_info(name_or_hex_address: str):\n try:\n if name_or_hex_address.lower().startswith('0x'):\n ea = int(name_or_hex_address, 16)\n else:\n ea = idaapi.get_name_ea(idaapi.BADADDR, name_or_hex_address)\n if ea == idaapi.BADADDR:\n raise Exception\n except Exception:\n return f'{name_or_hex_address} is not a valid address or name.'\n\n flags = idaapi.get_flags(ea)\n result = ''\n\n # 检查地址是否位于函数内部\n func = idaapi.get_func(ea)\n if func:\n result += \"Address 0x%X is inside a function.\\n\" % ea\n result += \"Function start: 0x%X\\n\" % func.start_ea\n result += \"Function end: 0x%X\\n\" % func.end_ea\n func_name = idaapi.get_func_name(func.start_ea)\n if func_name:\n result += \"Function name: %s\\n\" % func_name\n elif idaapi.is_code(flags):\n result += \"Address 0x%X is code.\\n\" % ea\n elif idaapi.is_data(flags):\n result += \"Address 0x%X is data.\\n\" % ea\n if idaapi.is_byte(flags):\n result += \"Data type: Byte\\n\"\n result += \"Value: %d\\n\" % idaapi.get_wide_byte(ea)\n elif idaapi.is_word(flags):\n result += \"Data type: Word\\n\"\n result += \"Value: %d\\n\" % idaapi.get_wide_word(ea)\n elif idaapi.is_dword(flags):\n result += \"Data type: Dword\\n\"\n result += \"Value: %d\\n\" % idaapi.get_wide_dword(ea)\n elif idaapi.is_qword(flags):\n result += \"Data type: Qword\\n\"\n result += \"Value: %d\\n\" % idaapi.get_qword(ea)\n elif idaapi.is_float(flags):\n result += \"Data type: Float\\n\"\n # result += \"Value: %f\\n\" % idaapi.get_wide_float(address)\n elif idaapi.is_double(flags):\n result += \"Data type: Double\\n\"\n # result += \"Value: %f\\n\" % idaapi.get_wide_double(address)\n elif idaapi.is_strlit(flags):\n result += \"Data type: String\\n\"\n result += \"Value: %s\\n\" % idaapi.get_strlit_contents(ea)\n elif idaapi.is_struct(flags):\n result += \"Data type: Struct\\n\"\n # ... 其他数据类型检查\n elif idaapi.is_unknown(flags):\n result += \"Address 0x%X is unknown.\\n\" % ea\n\n # 名称和注释\n if idaapi.has_name(flags):\n result += \"Name: %s\\n\" % idaapi.get_name(ea)\n elif idaapi.has_dummy_name(flags):\n result += \"Dummy name: %s\\n\" % idaapi.get_name(ea)\n\n if idaapi.has_cmt(flags):\n result += \"Comment: %s\\n\" % idaapi.get_cmt(ea, 0)\n\n if result == '':\n result = 'Address not found.'\n elif result[-1] == '\\n':\n result = result[:-1]\n\n return result\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n query = core.escape_agent_input(\n query, 'get_address_info')\n\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__get_address_info(query)),\n idaapi.MFF_WRITE))\n\n class __GetDefinitionTool(BaseTool):\n name = 'get_definition'\n description = ('Given a function name, show its definition. '\n 'NOTICE that the result is decompiled by IDA, so it may NOT be accurate. '\n '**Input Format**: `<function_name>`. '\n '**Input Example**: `sub_140007080`.')\n\n @staticmethod\n def __get_definition(function_name: str):\n try:\n return core.decompile_by_name(function_name).definition\n except Exception as e:\n return f'Failed to decompile: {e}'\n \n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n query = core.escape_agent_input(query, 'get_definition')\n\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__get_definition(query)), \n idaapi.MFF_WRITE))\n\n class __GetPseudocodeTool(BaseTool):\n name = 'get_pseudocode'\n description = ('Given a function name or hex address of a function, show its pseudocode. '\n 'NOTICE that the result is decompiled by IDA, so it may NOT be accurate. '\n '**Input Format**: `<function_name_or_hex_address>`. '\n '**Input Example1**: `sub_140007080`. '\n '**Input Example2**: `0x140007080`.')\n\n @staticmethod\n def __get_pseudocode(function_name_or_hex_address: str):\n try:\n if function_name_or_hex_address.lower().startswith('0x'):\n ea = int(function_name_or_hex_address, 16)\n return core.decompile_by_ea(ea).pseudocode\n\n return core.decompile_by_name(function_name_or_hex_address).pseudocode\n except Exception as e:\n return f'Failed to decompile: {e}'\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n query = core.escape_agent_input(\n query, 'get_pseudocode')\n\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__get_pseudocode(query)),\n idaapi.MFF_WRITE))\n\n class __SetFunctionCommentTool(BaseTool):\n name = 'set_function_comment'\n description = ('Given a function name and a comment, set the comment of the function. '\n '**Input Format**: `<function_name> <comment>`. '\n '**Input Example**: `sub_140007080 Copilot Comment: This function is used to do something.`')\n\n @staticmethod\n def __set_function_comment(function_name_and_comment: str):\n try:\n func_name, comment = function_name_and_comment.split(' ', 1)\n func_name = func_name.strip()\n\n if not comment.startswith('Copilot Comment:'):\n comment = 'Copilot Comment: ' + comment.strip()\n core.decompile_by_name(func_name).comment = comment\n\n return f'Successfully set comment of {func_name} to {comment}.'\n except Exception as e:\n return f'Failed to set comment: {e}'\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n query = core.escape_agent_input(\n query, 'set_function_comment')\n\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__set_function_comment(query)),\n idaapi.MFF_WRITE))\n\n class __SetFunctionDefinitionTool(BaseTool):\n name = 'set_function_definition'\n description = ('Set definition of a function. '\n '**Input Format**: `<return_type> [calling_convention] <function_name>(<param_type> [param_name], ...)`. '\n '**Input Example1**: `void sub_140005048(int a1, unsigned long long a2)`. '\n '**Input Example2**: `NTSTATUS __fastcall DriverIoControl(PDRIVER_OBJECT, PIRP)`.')\n\n @staticmethod\n def __set_function_definition(new_definition: str):\n func_pattern = re.compile(\n r'(?P<ret_type>[\\w\\s*]+?)\\s*(?P<cc>__\\w+\\s+)?(?P<func_name>\\w+)\\((?P<params>.*)\\)')\n # param_pattern = re.compile(r'(\\w+\\s*\\*?)\\s*(\\w+)')\n\n try:\n match = func_pattern.match(new_definition)\n if not match:\n return f'Invalid function definition, not match: {new_definition}'\n\n result = match.groupdict()\n return_type = result['ret_type'].strip() if result['ret_type'] else None\n if not return_type:\n return f'Invalid function definition, no return type: {new_definition}'\n\n # 上面的正则会漏掉一种情况\n # 例如,`NTSTATUSsub_140005048(PDRIVER_OBJECT driverObject, PIRP irp)`\n # 解析后,`ret_type`为`N`,`func_name`为`TSTATUSsub_140005048`\n # 因此我们要把这种输入列为无效输入\n if ' ' not in new_definition[:new_definition.index('(')]:\n return f'Invalid function definition, no func name: {new_definition}'\n\n func_name = result['func_name'].strip()\n core.decompile_by_name(func_name).definition = new_definition\n\n return f'Successfully set definition of {func_name} to {new_definition}.'\n except Exception as e:\n return f'Failed to set definition: {e}'\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n query = core.escape_agent_input(\n query, 'set_function_definition')\n\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__set_function_definition(query)),\n idaapi.MFF_WRITE))\n\n class __SetFunctionNameTool(BaseTool):\n name = 'set_function_name'\n description = ('Given a function name, rename it. '\n '**Input Format**: <old_name> <new_name>. '\n '**Input Example**: sub_140007080 DeviceIoControl.')\n\n @staticmethod\n def __set_function_name(old_name_and_new_name: str):\n try:\n old_name, new_name = old_name_and_new_name.split(' ')\n old_name = old_name.strip()\n core.decompile_by_name(old_name).name = new_name\n\n return f'Successfully renamed {old_name} to {new_name}.'\n except Exception as e:\n return f'Failed to set function name: {e}'\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__set_function_name(query)),\n idaapi.MFF_WRITE))\n\n class __GetIsMyWorkDoneTool(BaseTool):\n name = 'get_is_my_work_done'\n description = ('Given a function name, return whether the work is done. '\n 'Also return tips if not done.')\n func: Optional[core.DecompiledFunction] = None\n\n def __init__(self, current_func_ea, **kwargs: Any):\n super().__init__(**kwargs)\n self.func = core.decompile_by_ea(current_func_ea)\n\n def __get_is_my_work_done(self):\n try:\n for function in self.func.functions:\n ea = function['ea']\n func_name = idaapi.get_func_name(ea)\n if func_name.startswith('sub_'):\n return (f'No, function `{func_name}` at 0x{ea:x} is not renamed yet. Please continue your work.'\n f'REMEMBER, your goal is to rename all functions that start with `sub_`.'\n f'AND, your are analyzing function `{self.func.name}`.')\n\n return f'Yes, function `{self.func.name}` is fully analyzed.'\n\n except Exception as e:\n return f'Failed to get is my work done: {e}'\n\n def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> Any:\n return core.pop_async_call_result(\n idaapi.execute_sync(\n lambda: core.push_async_call_result(self.__get_is_my_work_done()),\n idaapi.MFF_WRITE))"
}
] | import ida_hexrays
import ida_kernwin
import idaapi
from ida_copilot import panel
from ida_copilot.copilot import Copilot | 3,961 |
class CopilotPluginActionHandler(idaapi.action_handler_t):
def __init__(self):
super(CopilotPluginActionHandler, self).__init__()
def activate(self, ctx):
ida_kernwin.show_wait_box('HIDECANCEL\nRunning Copilot...')
try:
Copilot().run()
finally:
ida_kernwin.hide_wait_box()
ida_hexrays.get_widget_vdui(ctx.widget).refresh_view(True)
ida_kernwin.refresh_idaview_anyway()
def on_task_complete(self, future):
# 关闭进度条或状态信息
ida_kernwin.hide_wait_box()
# 更新UI...
ida_kernwin.refresh_idaview_anyway()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class CopilotPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Copilot"
help = "Copilot"
wanted_name = "Copilot"
wanted_hotkey = ""
def init(self):
if not ida_hexrays.init_hexrays_plugin():
print("Hex-Rays decompiler is not available!")
return
run_action = idaapi.action_desc_t(
'copilot:run',
'Run Copilot',
CopilotPluginActionHandler(),
'Ctrl+Shift+P',
'使用Copilot分析当前函数',
-1)
idaapi.register_action(run_action)
idaapi.attach_action_to_menu(
'Edit/Copilot',
'copilot:run',
idaapi.SETMENU_APP)
action_desc = idaapi.action_desc_t(
'copilot:show_panel',
'Show Copilot',
|
class CopilotPluginActionHandler(idaapi.action_handler_t):
def __init__(self):
super(CopilotPluginActionHandler, self).__init__()
def activate(self, ctx):
ida_kernwin.show_wait_box('HIDECANCEL\nRunning Copilot...')
try:
Copilot().run()
finally:
ida_kernwin.hide_wait_box()
ida_hexrays.get_widget_vdui(ctx.widget).refresh_view(True)
ida_kernwin.refresh_idaview_anyway()
def on_task_complete(self, future):
# 关闭进度条或状态信息
ida_kernwin.hide_wait_box()
# 更新UI...
ida_kernwin.refresh_idaview_anyway()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class CopilotPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Copilot"
help = "Copilot"
wanted_name = "Copilot"
wanted_hotkey = ""
def init(self):
if not ida_hexrays.init_hexrays_plugin():
print("Hex-Rays decompiler is not available!")
return
run_action = idaapi.action_desc_t(
'copilot:run',
'Run Copilot',
CopilotPluginActionHandler(),
'Ctrl+Shift+P',
'使用Copilot分析当前函数',
-1)
idaapi.register_action(run_action)
idaapi.attach_action_to_menu(
'Edit/Copilot',
'copilot:run',
idaapi.SETMENU_APP)
action_desc = idaapi.action_desc_t(
'copilot:show_panel',
'Show Copilot', | panel.ShowCopilotPanel(panel.CopilotPanel()), | 0 | 2023-11-02 14:23:11+00:00 | 8k |
WSH032/fastapi-proxy-lib | tests/test_ws.py | [
{
"identifier": "get_app",
"path": "tests/app/echo_ws_app.py",
"snippet": "def get_app() -> AppDataclass4Test: # noqa: C901, PLR0915\n \"\"\"Get the echo ws app.\n\n Returns:\n TestAppDataclass.\n \"\"\"\n app = FastAPI()\n request_dict = RequestDict(request=None)\n test_app_dataclass = AppDataclass4Test(app=app, request_dict=request_dict)\n\n @app.websocket_route(\"/echo_text\")\n async def echo_text(websocket: WebSocket):\n \"\"\"Websocket endpoint for echo text. Just receive text and send it back.\n\n Note: client must send text first.\n \"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n await websocket.accept()\n while True:\n try:\n recev = await websocket.receive_text()\n await websocket.send_text(recev)\n except WebSocketDisconnect:\n break\n\n @app.websocket_route(\"/echo_bytes\")\n async def echo_bytes(websocket: WebSocket):\n \"\"\"Websocket endpoint for echo bytes. Just receive bytes and send it back.\n\n Note: client must send bytes first.\n \"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n await websocket.accept()\n while True:\n try:\n recev = await websocket.receive_bytes()\n await websocket.send_bytes(recev)\n except WebSocketDisconnect:\n break\n\n @app.websocket_route(\"/accept_foo_subprotocol\")\n async def accept_foo_subprotocol(websocket: WebSocket):\n \"\"\"When client send subprotocols request, if subprotocols contain \"foo\", will accept it.\"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n # https://asgi.readthedocs.io/en/latest/specs/www.html#websocket-connection-scope\n if \"foo\" in websocket.scope[\"subprotocols\"]:\n accepted_subprotocol = \"foo\"\n else:\n accepted_subprotocol = None\n\n await websocket.accept(subprotocol=accepted_subprotocol)\n\n await websocket.close()\n\n @app.websocket_route(\"/just_close_with_1001\")\n async def just_close_with_1001(websocket: WebSocket):\n \"\"\"Just do nothing after `accept`, then close ws with 1001 code.\"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n await websocket.accept()\n await asyncio.sleep(0.3)\n await websocket.close(1001)\n\n @app.websocket_route(\"/reject_handshake\")\n async def reject_handshake(websocket: WebSocket):\n \"\"\"Will reject ws request by just calling `websocket.close()`.\"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n await websocket.close()\n\n @app.websocket_route(\"/do_nothing\")\n async def do_nothing(websocket: WebSocket):\n \"\"\"Will do nothing except `websocket.accept()`.\"\"\"\n nonlocal test_app_dataclass\n test_app_dataclass.request_dict[\"request\"] = websocket\n\n await websocket.accept()\n\n return test_app_dataclass"
},
{
"identifier": "UvicornServer",
"path": "tests/app/tool.py",
"snippet": "class UvicornServer(uvicorn.Server):\n \"\"\"subclass of `uvicorn.Server` which can use AsyncContext to launch and shutdown automatically.\n\n Attributes:\n contx_server_task: The task of server.\n contx_socket: The socket of server.\n\n other attributes are same as `uvicorn.Server`:\n - config: The config arg that be passed in.\n ...\n \"\"\"\n\n _contx_server_task: Union[\"asyncio.Task[None]\", None]\n assert not hasattr(uvicorn.Server, \"_contx_server_task\")\n\n _contx_socket: Union[socket.socket, None]\n assert not hasattr(uvicorn.Server, \"_contx_socket\")\n\n _contx_server_started_event: Union[asyncio.Event, None]\n assert not hasattr(uvicorn.Server, \"_contx_server_started_event\")\n\n contx_exit_timeout: Union[int, float, None]\n assert not hasattr(uvicorn.Server, \"contx_exit_timeout\")\n\n @override\n def __init__(\n self, config: uvicorn.Config, contx_exit_timeout: Union[int, float, None] = None\n ) -> None:\n \"\"\"The same as `uvicorn.Server.__init__`.\"\"\"\n super().__init__(config=config)\n self._contx_server_task = None\n self._contx_socket = None\n self._contx_server_started_event = None\n self.contx_exit_timeout = contx_exit_timeout\n\n @override\n async def startup(self, sockets: Optional[List[socket.socket]] = None) -> None:\n \"\"\"The same as `uvicorn.Server.startup`.\"\"\"\n super_return = await super().startup(sockets=sockets)\n self.contx_server_started_event.set()\n return super_return\n\n @_no_override_uvicorn_server\n async def aenter(self) -> Self:\n \"\"\"Launch the server.\"\"\"\n # 在分配资源之前,先检查是否重入\n if self.contx_server_started_event.is_set():\n raise RuntimeError(\"DO not launch server by __aenter__ again!\")\n\n # FIXME: # 这个socket被设计为可被同一进程内的多个server共享,可能会引起潜在问题\n self._contx_socket = self.config.bind_socket()\n\n self._contx_server_task = asyncio.create_task(\n self.serve([self._contx_socket]), name=f\"Uvicorn Server Task of {self}\"\n )\n # 在 uvicorn.Server 的实现中,Server.serve() 内部会调用 Server.startup() 完成启动\n # 被覆盖的 self.startup() 会在完成时调用 self.contx_server_started_event.set()\n await self.contx_server_started_event.wait() # 等待服务器确实启动后才返回\n return self\n\n @_no_override_uvicorn_server\n async def __aenter__(self) -> Self:\n \"\"\"Launch the server.\n\n The same as `self.aenter()`.\n \"\"\"\n return await self.aenter()\n\n @_no_override_uvicorn_server\n async def aexit(\n self,\n contx_exit_timeout: Union[\n int, float, None, AeixtTimeoutUndefine\n ] = aexit_timeout_undefine,\n ) -> None:\n \"\"\"Shutdown the server.\"\"\"\n contx_server_task = self.contx_server_task\n contx_socket = self.contx_socket\n\n if isinstance(contx_exit_timeout, AeixtTimeoutUndefine):\n contx_exit_timeout = self.contx_exit_timeout\n\n # 在 uvicorn.Server 的实现中,设置 should_exit 可以使得 server 任务结束\n assert hasattr(self, \"should_exit\")\n self.should_exit = True\n\n try:\n await asyncio.wait_for(contx_server_task, timeout=contx_exit_timeout)\n except asyncio.TimeoutError:\n print(f\"{contx_server_task.get_name()} timeout!\")\n finally:\n # 其实uvicorn.Server会自动关闭socket,这里是为了保险起见\n contx_socket.close()\n\n @_no_override_uvicorn_server\n async def __aexit__(self, *_: Any, **__: Any) -> None:\n \"\"\"Shutdown the server.\n\n The same as `self.aexit()`.\n \"\"\"\n return await self.aexit()\n\n @property\n @_no_override_uvicorn_server\n def contx_server_started_event(self) -> asyncio.Event:\n \"\"\"The event that indicates the server has started.\n\n When first call the property, it will instantiate a `asyncio.Event()`to\n `self._contx_server_started_event`.\n\n Warn: This is a internal implementation detail, do not change the event manually.\n - please call the property in `self.aenter()` or `self.startup()` **first**.\n - **Never** call it outside of an async event loop first:\n https://stackoverflow.com/questions/53724665/using-queues-results-in-asyncio-exception-got-future-future-pending-attached\n \"\"\"\n if self._contx_server_started_event is None:\n self._contx_server_started_event = asyncio.Event()\n\n return self._contx_server_started_event\n\n @property\n @_no_override_uvicorn_server\n def contx_socket(self) -> socket.socket:\n \"\"\"The socket of server.\n\n Note: must call `self.__aenter__()` first.\n \"\"\"\n if self._contx_socket is None:\n raise RuntimeError(\"Please call `self.__aenter__()` first.\")\n else:\n return self._contx_socket\n\n @property\n @_no_override_uvicorn_server\n def contx_server_task(self) -> \"asyncio.Task[None]\":\n \"\"\"The task of server.\n\n Note: must call `self.__aenter__()` first.\n \"\"\"\n if self._contx_server_task is None:\n raise RuntimeError(\"Please call `self.__aenter__()` first.\")\n else:\n return self._contx_server_task\n\n @property\n @_no_override_uvicorn_server\n def contx_socket_getname(self) -> Any:\n \"\"\"Utils for calling self.contx_socket.getsockname().\n\n Return:\n refer to: https://docs.python.org/zh-cn/3/library/socket.html#socket-families\n \"\"\"\n return self.contx_socket.getsockname()\n\n @property\n @_no_override_uvicorn_server\n def contx_socket_url(self) -> httpx.URL:\n \"\"\"If server is tcp socket, return the url of server.\n\n Note: The path of url is explicitly set to \"/\".\n \"\"\"\n config = self.config\n if config.fd is not None or config.uds is not None:\n raise RuntimeError(\"Only support tcp socket.\")\n host, port = self.contx_socket_getname[:2]\n return httpx.URL(\n host=host,\n port=port,\n scheme=\"https\" if config.is_ssl else \"http\",\n path=\"/\",\n )"
},
{
"identifier": "UvicornServerFixture",
"path": "tests/conftest.py",
"snippet": "class UvicornServerFixture(Protocol): # noqa: D101\n def __call__( # noqa: D102\n self, config: uvicorn.Config, contx_exit_timeout: Union[int, float, None] = None\n ) -> Coroutine[None, None, UvicornServer]:\n ..."
},
{
"identifier": "AbstractTestProxy",
"path": "tests/tool.py",
"snippet": "class AbstractTestProxy(abc.ABC):\n \"\"\"Abstract class for testing proxy.\"\"\"\n\n @abc.abstractmethod\n def tool_4_test_fixture(self) -> Tool4TestFixture:\n \"\"\"Get the tool for test server.\"\"\""
},
{
"identifier": "Tool4TestFixture",
"path": "tests/tool.py",
"snippet": "class Tool4TestFixture:\n \"\"\"Tool for test server.\n\n Attributes:\n client_for_conn_to_target_server: The client for connecting to target server.\n client_for_conn_to_proxy_server: The client for connecting to proxy server.\n get_request: Get the latest original http/websocket request from the client.\n target_server_base_url: The base url of target server.\n proxy_server_base_url: The base url of proxy server.\n \"\"\"\n\n client_for_conn_to_target_server: httpx.AsyncClient\n client_for_conn_to_proxy_server: httpx.AsyncClient\n get_request: Callable[[], ServerRecvRequestsTypes]\n target_server_base_url: str\n proxy_server_base_url: str"
}
] | import asyncio
import httpx
import httpx_ws
import pytest
import uvicorn
from contextlib import AsyncExitStack
from multiprocessing import Process, Queue
from typing import Any, Dict, Literal, Optional
from fastapi_proxy_lib.fastapi.app import reverse_ws_app as get_reverse_ws_app
from httpx_ws import aconnect_ws
from starlette import websockets as starlette_websockets_module
from typing_extensions import override
from .app.echo_ws_app import get_app as get_ws_test_app
from .app.tool import UvicornServer
from .conftest import UvicornServerFixture
from .tool import (
AbstractTestProxy,
Tool4TestFixture,
) | 3,615 | # noqa: D100
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 0
DEFAULT_CONTX_EXIT_TIMEOUT = 5
# WS_BACKENDS_NEED_BE_TESTED = ("websockets", "wsproto")
# # FIXME: wsproto 有问题,暂时不测试
# # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。
# # https://github.com/encode/uvicorn/discussions/2105
WS_BACKENDS_NEED_BE_TESTED = ("websockets",)
# https://www.python-httpx.org/advanced/#http-proxying
NO_PROXIES: Dict[Any, Any] = {"all://": None}
def _subprocess_run_echo_ws_uvicorn_server(queue: "Queue[str]", **kwargs: Any):
"""Run echo ws app in subprocess.
Args:
queue: The queue for subprocess to put the url of echo ws app.
After the server is started, the url will be put into the queue.
**kwargs: The kwargs for `uvicorn.Config`
"""
default_kwargs = {
"app": get_ws_test_app().app,
"port": DEFAULT_PORT,
"host": DEFAULT_HOST,
}
default_kwargs.update(kwargs)
target_ws_server = UvicornServer(
uvicorn.Config(**default_kwargs), # pyright: ignore[reportGeneralTypeIssues]
)
async def run():
await target_ws_server.aenter()
url = str(target_ws_server.contx_socket_url)
queue.put(url)
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
def _subprocess_run_httpx_ws(
queue: "Queue[str]",
kwargs_async_client: Optional[Dict[str, Any]] = None,
kwargs_aconnect_ws: Optional[Dict[str, Any]] = None,
):
"""Run aconnect_ws in subprocess.
Args:
queue: The queue for subprocess to put something for flag of ws connection established.
kwargs_async_client: The kwargs for `httpx.AsyncClient`
kwargs_aconnect_ws: The kwargs for `httpx_ws.aconnect_ws`
"""
kwargs_async_client = kwargs_async_client or {}
kwargs_aconnect_ws = kwargs_aconnect_ws or {}
kwargs_async_client.pop("proxies", None)
kwargs_aconnect_ws.pop("client", None)
async def run():
_exit_stack = AsyncExitStack()
_temp_client = httpx.AsyncClient(proxies=NO_PROXIES, **kwargs_async_client)
_ = await _exit_stack.enter_async_context(
aconnect_ws(
client=_temp_client,
**kwargs_aconnect_ws,
)
)
queue.put("done")
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
class TestReverseWsProxy(AbstractTestProxy):
"""For testing reverse websocket proxy."""
@override
@pytest.fixture(params=WS_BACKENDS_NEED_BE_TESTED)
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self,
| # noqa: D100
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 0
DEFAULT_CONTX_EXIT_TIMEOUT = 5
# WS_BACKENDS_NEED_BE_TESTED = ("websockets", "wsproto")
# # FIXME: wsproto 有问题,暂时不测试
# # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。
# # https://github.com/encode/uvicorn/discussions/2105
WS_BACKENDS_NEED_BE_TESTED = ("websockets",)
# https://www.python-httpx.org/advanced/#http-proxying
NO_PROXIES: Dict[Any, Any] = {"all://": None}
def _subprocess_run_echo_ws_uvicorn_server(queue: "Queue[str]", **kwargs: Any):
"""Run echo ws app in subprocess.
Args:
queue: The queue for subprocess to put the url of echo ws app.
After the server is started, the url will be put into the queue.
**kwargs: The kwargs for `uvicorn.Config`
"""
default_kwargs = {
"app": get_ws_test_app().app,
"port": DEFAULT_PORT,
"host": DEFAULT_HOST,
}
default_kwargs.update(kwargs)
target_ws_server = UvicornServer(
uvicorn.Config(**default_kwargs), # pyright: ignore[reportGeneralTypeIssues]
)
async def run():
await target_ws_server.aenter()
url = str(target_ws_server.contx_socket_url)
queue.put(url)
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
def _subprocess_run_httpx_ws(
queue: "Queue[str]",
kwargs_async_client: Optional[Dict[str, Any]] = None,
kwargs_aconnect_ws: Optional[Dict[str, Any]] = None,
):
"""Run aconnect_ws in subprocess.
Args:
queue: The queue for subprocess to put something for flag of ws connection established.
kwargs_async_client: The kwargs for `httpx.AsyncClient`
kwargs_aconnect_ws: The kwargs for `httpx_ws.aconnect_ws`
"""
kwargs_async_client = kwargs_async_client or {}
kwargs_aconnect_ws = kwargs_aconnect_ws or {}
kwargs_async_client.pop("proxies", None)
kwargs_aconnect_ws.pop("client", None)
async def run():
_exit_stack = AsyncExitStack()
_temp_client = httpx.AsyncClient(proxies=NO_PROXIES, **kwargs_async_client)
_ = await _exit_stack.enter_async_context(
aconnect_ws(
client=_temp_client,
**kwargs_aconnect_ws,
)
)
queue.put("done")
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
class TestReverseWsProxy(AbstractTestProxy):
"""For testing reverse websocket proxy."""
@override
@pytest.fixture(params=WS_BACKENDS_NEED_BE_TESTED)
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self, | uvicorn_server_fixture: UvicornServerFixture, | 2 | 2023-11-08 04:38:36+00:00 | 8k |
aws-samples/amazon-location-geospatial-agent | geospatial_agent/agent/geospatial/solver/solver.py | [
{
"identifier": "NODE_TYPE_ATTRIBUTE",
"path": "geospatial_agent/agent/geospatial/solver/constants.py",
"snippet": "NODE_TYPE_ATTRIBUTE = \"node_type\""
},
{
"identifier": "NODE_TYPE_OPERATION",
"path": "geospatial_agent/agent/geospatial/solver/constants.py",
"snippet": "NODE_TYPE_OPERATION = \"operation\""
},
{
"identifier": "OperationsParser",
"path": "geospatial_agent/agent/geospatial/solver/op_graph.py",
"snippet": "class OperationsParser:\n def __init__(self, graph: networkx.DiGraph):\n self.graph = graph\n\n self.op_node_names = self._get_operation_node_names()\n self.operation_nodes = self._get_operation_nodes(self.op_node_names)\n self.output_node_names = self._get_output_node_names()\n self.input_node_names = self._get_input_node_names()\n\n def get_ancestors(self, node_name) -> Sequence[OperationNode]:\n ancestor_node_names = networkx.ancestors(self.graph, node_name)\n\n ancestor_operation_names = []\n for node_name in ancestor_node_names:\n if node_name in self.op_node_names:\n ancestor_operation_names.append(node_name)\n\n ancestor_operation_functions = []\n for op_node in self.operation_nodes:\n op_node_name = op_node.node_name\n if op_node_name in ancestor_operation_names:\n ancestor_operation_functions.append(op_node)\n\n return ancestor_operation_functions\n\n def get_descendants(self, node_name) -> Sequence[OperationNode]:\n descendant_operation_names = []\n descendant_node_names = networkx.descendants(self.graph, node_name)\n\n for descendant in descendant_node_names:\n if descendant in self.op_node_names:\n descendant_operation_names.append(descendant)\n\n descendant_operation_nodes = []\n for op_node in self.operation_nodes:\n op_name = op_node.node_name\n if op_name in descendant_operation_names:\n descendant_operation_nodes.append(op_node)\n\n return descendant_operation_nodes\n\n def stringify_nodes(self, nodes: Sequence[OperationNode]) -> str:\n \"\"\"Returns all operation nodes attributes stringified as a new line delimited string\"\"\"\n op_def_list = []\n for op_node in nodes:\n op_node_dict = op_node.__dict__\n op_def_list.append(str(op_node_dict))\n\n defs = '\\n'.join(op_def_list)\n return defs\n\n def _get_operation_nodes(self, op_node_names) -> Sequence[OperationNode]:\n op_nodes = []\n for op in op_node_names:\n node_dict = self.graph.nodes[op]\n\n node_type = node_dict[NODE_TYPE_ATTRIBUTE]\n if node_type != NODE_TYPE_OPERATION:\n raise OperationsParserException(f\"Node {op} is not an operation node\")\n\n function_def, param_names = self._get_func_def_str(op)\n\n successors = list(self.graph.successors(op))\n return_str = 'return ' + ', '.join(successors)\n\n op_node = OperationNode(\n function_definition=function_def,\n return_line=return_str,\n description=node_dict[NODE_DESCRIPTION_ATTRIBUTE],\n operation_type=node_dict.get(NODE_TYPE_OPERATION_TYPE, \"\"),\n node_name=op,\n param_names=param_names,\n return_names=set(successors)\n )\n\n op_nodes.append(op_node)\n return op_nodes\n\n def _get_operation_node_names(self):\n op_nodes = []\n for node_name in self.graph.nodes():\n node = self.graph.nodes[node_name]\n if node[NODE_TYPE_ATTRIBUTE] == NODE_TYPE_OPERATION:\n op_nodes.append(node_name)\n return op_nodes\n\n def _get_output_node_names(self):\n \"\"\"Returns output nodes from the graph. Output nodes have 'output' attribute set to True\"\"\"\n output_nodes = []\n for node_name in self.graph.nodes():\n node = self.graph.nodes[node_name]\n if len(list(self.graph.successors(node_name))) == 0:\n if node[NODE_TYPE_ATTRIBUTE] != NODE_TYPE_DATA:\n raise OperationsParserException(f\"Node {node_name} is not an {NODE_TYPE_DATA} node\")\n output_nodes.append(node_name)\n return output_nodes\n\n def _get_input_node_names(self):\n \"\"\"Returns input nodes from the graph. Input nodes have 'input' attribute set to True\"\"\"\n input_nodes = []\n for node_name in self.graph.nodes():\n node = self.graph.nodes[node_name]\n if len(list(self.graph.predecessors(node_name))) == 0:\n if node[NODE_TYPE_ATTRIBUTE] != NODE_TYPE_DATA:\n raise OperationsParserException(f\"Node {node_name} is not an {NODE_TYPE_DATA} node\")\n input_nodes.append(node_name)\n return input_nodes\n\n def _get_func_def_str(self, node):\n \"\"\"\n Returns function definition string with function name, parameters and default values of parameters.\n \"\"\"\n\n # INFO: To generate a function definition from the solution graph, we need to find the parameters of the\n # function, and the return value. We start with looking for the predecessors of the node.\n # Because the parameters are the predecessors.\n\n predecessors = self.graph.predecessors(node)\n\n param_default_str = ''\n param_str = ''\n param_names = set()\n\n for data_node in predecessors:\n param_node = self.graph.nodes[data_node]\n\n # INFO: The parameter node may have a data_path attribute specifying the location of its data,\n # like a URL or filepath, which should be used if present; otherwise the node name can be\n # used as the default parameter value.\n\n data_path = param_node.get(NODE_DATA_PATH_ATTRIBUTE, '')\n param_names.add(data_node)\n\n if data_path != \"\":\n param_default_str = param_default_str + f\"{data_node}='{data_path}', \"\n else:\n param_str = param_str + f\"{data_node}, \"\n\n all_parameters_str = param_str + param_default_str\n\n func_def = f'{node}({all_parameters_str})'\n func_def = func_def.replace(', )', ')')\n\n return func_def, param_names"
},
{
"identifier": "OperationNode",
"path": "geospatial_agent/agent/geospatial/solver/op_graph.py",
"snippet": "class OperationNode:\n def __init__(self,\n function_definition: str,\n return_line: str,\n description: str,\n node_name: str,\n param_names: set,\n return_names: set,\n operation_type: str = \"\",\n code_gen_response: str = \"\",\n operation_code: str = \"\",\n reviewed_code: str = \"\",\n operation_prompt: str = \"\"):\n self.function_definition = function_definition\n self.return_line = return_line\n self.description = description\n self.operation_type = operation_type\n self.node_name = node_name\n self.param_names = param_names\n self.return_names = return_names\n self.code_gen_response = code_gen_response\n self.operation_code = operation_code\n self.reviewed_code = reviewed_code\n self.operation_prompt = operation_prompt"
},
{
"identifier": "operation_code_gen_intro",
"path": "geospatial_agent/agent/geospatial/solver/prompts.py",
"snippet": ""
},
{
"identifier": "SIGNAL_OPERATION_CODE_GENERATED",
"path": "geospatial_agent/agent/shared.py",
"snippet": "SIGNAL_OPERATION_CODE_GENERATED = \"operation_code_generated\""
},
{
"identifier": "SENDER_GEOSPATIAL_AGENT",
"path": "geospatial_agent/agent/shared.py",
"snippet": "SENDER_GEOSPATIAL_AGENT = \"geospatial_agent\""
},
{
"identifier": "AgentSignal",
"path": "geospatial_agent/agent/shared.py",
"snippet": "class AgentSignal(BaseModel):\n id: str = Field(default_factory=lambda: uuid4().__str__())\n timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())\n event_source: str = Field()\n event_message: str = Field()\n event_data: T = Field(default=None)\n event_type: EventType = Field(default=EventType.Message)\n is_final: bool = Field(default=False)"
},
{
"identifier": "EventType",
"path": "geospatial_agent/agent/shared.py",
"snippet": "class EventType(Enum):\n PythonCode = auto()\n Message = auto()\n Error = auto()"
},
{
"identifier": "SIGNAL_TAIL_CODE_GENERATED",
"path": "geospatial_agent/agent/shared.py",
"snippet": "SIGNAL_TAIL_CODE_GENERATED = \"tail_code_generated\""
},
{
"identifier": "HUMAN_ROLE",
"path": "geospatial_agent/shared/prompts.py",
"snippet": "HUMAN_ROLE = HUMAN_STOP_SEQUENCE"
},
{
"identifier": "ASSISTANT_ROLE",
"path": "geospatial_agent/shared/prompts.py",
"snippet": "ASSISTANT_ROLE = \"\\n\\nAssistant\""
},
{
"identifier": "HUMAN_STOP_SEQUENCE",
"path": "geospatial_agent/shared/prompts.py",
"snippet": "HUMAN_STOP_SEQUENCE = '\\n\\nHuman'"
},
{
"identifier": "get_shim_imports",
"path": "geospatial_agent/shared/shim.py",
"snippet": "def get_shim_imports() -> str:\n shim_map_style_import = f'from {location_map_style.__module__} import {location_map_style.__name__} \\n' \\\n f'from {get_data_file_url.__module__} import {get_data_file_url.__name__}\\n' \\\n f'from {get_local_file_path.__module__} import {get_local_file_path.__name__}\\n'\n return shim_map_style_import"
},
{
"identifier": "extract_code",
"path": "geospatial_agent/shared/utils.py",
"snippet": "def extract_code(response):\n \"\"\"Extract python code from LLM response.\"\"\"\n\n python_code_match = re.search(r\"```(?:python)?(.*?)```\", response, re.DOTALL)\n if python_code_match:\n python_code = python_code_match.group(1).strip()\n return python_code\n else:\n raise ExtractionException(\"Failed to extract python code from response\")"
},
{
"identifier": "extract_content_xml",
"path": "geospatial_agent/shared/utils.py",
"snippet": "def extract_content_xml(tag: str, response: str) -> str:\n pattern = f\"<{tag}>(.*?)<\\/{tag}>\"\n match = re.search(pattern, response, re.DOTALL)\n if match:\n return match.group(1).strip()\n else:\n raise ExtractionException(f\"Failed to extract {tag} from response\")"
}
] | import json
import networkx
from langchain import PromptTemplate, LLMChain
from langchain.llms.base import LLM
from pydispatch import dispatcher
from geospatial_agent.agent.geospatial.solver.constants import NODE_TYPE_ATTRIBUTE, NODE_TYPE_OPERATION
from geospatial_agent.agent.geospatial.solver.op_graph import OperationsParser, OperationNode
from geospatial_agent.agent.geospatial.solver.prompts import operation_code_gen_intro, \
operation_task_prefix, operation_reply_example, operation_code_gen_prompt_template, \
operation_pydeck_example, operation_requirement_gen_task_prefix, predefined_operation_requirements, \
shim_instructions
from geospatial_agent.agent.shared import SIGNAL_OPERATION_CODE_GENERATED, SENDER_GEOSPATIAL_AGENT, AgentSignal, \
EventType, SIGNAL_TAIL_CODE_GENERATED
from geospatial_agent.shared.prompts import HUMAN_ROLE, ASSISTANT_ROLE, HUMAN_STOP_SEQUENCE
from geospatial_agent.shared.shim import get_shim_imports
from geospatial_agent.shared.utils import extract_code, extract_content_xml
from typing import List | 3,869 | dispatcher.send(signal=SIGNAL_OPERATION_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"{idx + 1} / {len(op_nodes)}: Generated code for operation {op_node.node_name}",
event_data=operation_code_gen_output.operation_code,
event_type=EventType.PythonCode
))
# INFO: Updating Operation Nodes with generated code
op_node.operation_prompt = operation_code_gen_output.operation_prompt
op_node.code_gen_response = operation_code_gen_output.operation_code_gen_response
op_node.operation_code = operation_code_gen_output.operation_code
return op_nodes
def assemble(self):
output_node_names = self.operation_parser.output_node_names
operation_nodes = self.operation_parser.operation_nodes
# The head end of the code
head = ""
# The tail end of the code
tail = ""
reverse_graph = self.graph.reverse(copy=True)
for idx, output_node in enumerate(output_node_names):
bfs_edges = networkx.bfs_edges(reverse_graph, source=output_node)
for bfs_edge in bfs_edges:
from_node_name, _ = bfs_edge
current_nx_node = self.graph.nodes[from_node_name]
if current_nx_node.get(NODE_TYPE_ATTRIBUTE, None) == NODE_TYPE_OPERATION:
op_node: OperationNode = next(
(op_node for op_node in operation_nodes if op_node.node_name == from_node_name), None)
head = "\n" + op_node.operation_code + "\n" + head
tail = f'{", ".join(op_node.return_names)}={op_node.function_definition}\n' + tail
# Adding the session id and task name to the code
tail = f'\nsession_id = "{self.session_id}"\n' + \
f'task_name = "{self.task_name}"\n' + \
f'storage_mode = "{self.storage_mode}"\n' + \
tail
dispatcher.send(signal=SIGNAL_TAIL_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"Generated final code block.",
event_data=tail,
event_type=EventType.PythonCode
))
assembled_code = head + "\n" + tail
assembled_code = f'{get_shim_imports()}\n{assembled_code}'
return assembled_code
def get_operation_requirement(self, op_node: OperationNode) -> list[str]:
node_name = op_node.node_name
task_def = self.task_def.strip("\n").strip()
op_properties = [
f'The function description is: {op_node.description}',
f'The type of work done in this function is: {op_node.operation_type}',
f'This function is one step to solve the question/task: {task_def}'
]
op_properties_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(op_properties)])
operation_requirement_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(predefined_operation_requirements)])
op_req_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_requirement_gen_task_prefix)
chain = LLMChain(llm=self.llm, prompt=op_req_gen_prompt_template)
req_gen_response = chain.run(
human_role=HUMAN_ROLE,
operation_req_gen_intro=operation_code_gen_intro,
operation_name=node_name,
pre_requirements=operation_requirement_str,
operation_properties=op_properties_str,
assistant_role=ASSISTANT_ROLE,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
operation_requirement_json = extract_content_xml("json", req_gen_response)
operation_requirement_list: List[str] = json.loads(operation_requirement_json)
operation_requirement_list = shim_instructions + operation_requirement_list
return operation_requirement_list
def gen_operation_code(self, op_node: OperationNode) -> OperationCodeGenOutput:
operation_requirement_list = self.get_operation_requirement(op_node)
node_name = op_node.node_name
# Get ancestors operations functions. For operations that has ancestors, this will also come with LLM
# generated code for the operations.
ancestor_op_nodes = self.operation_parser.get_ancestors(node_name)
ancestor_op_nodes_code = '\n'.join([op_node.operation_code for op_node in ancestor_op_nodes])
descendant_op_node = self.operation_parser.get_descendants(node_name)
descendant_op_node_defs = self.operation_parser.stringify_nodes(descendant_op_node)
pre_requirements = [
f'The function description is: {op_node.description}',
f'The function definition is: {op_node.function_definition}',
f'The function return line is: {op_node.return_line}'
]
operation_requirements_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(pre_requirements + operation_requirement_list)])
op_code_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_code_gen_prompt_template)
op_code_gen_prompt = op_code_gen_prompt_template.format(
human_role=HUMAN_ROLE,
operation_code_gen_intro=operation_code_gen_intro,
|
class OperationCodeGenOutput:
def __init__(self,
operation_prompt: str,
operation_code_gen_response: str,
operation_code: str):
self.operation_prompt = operation_prompt
self.operation_code_gen_response = operation_code_gen_response
self.operation_code = operation_code
class InvalidStateError(Exception):
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class Solver:
def __init__(self,
llm: LLM,
graph: networkx.DiGraph,
graph_code: str,
session_id: str,
storage_mode: str,
task_definition: str,
task_name: str,
data_locations_instructions: str):
self.llm = llm
self.graph = graph
self.graph_code = graph_code
self.session_id = session_id
self.storage_mode = storage_mode
self.task_def = task_definition
self.task_name = task_name
self.data_locations_instructions = data_locations_instructions
self.operation_parser = OperationsParser(graph)
def solve(self):
op_nodes = self.operation_parser.operation_nodes
for idx, op_node in enumerate(op_nodes):
operation_code_gen_output = self.gen_operation_code(op_node)
dispatcher.send(signal=SIGNAL_OPERATION_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"{idx + 1} / {len(op_nodes)}: Generated code for operation {op_node.node_name}",
event_data=operation_code_gen_output.operation_code,
event_type=EventType.PythonCode
))
# INFO: Updating Operation Nodes with generated code
op_node.operation_prompt = operation_code_gen_output.operation_prompt
op_node.code_gen_response = operation_code_gen_output.operation_code_gen_response
op_node.operation_code = operation_code_gen_output.operation_code
return op_nodes
def assemble(self):
output_node_names = self.operation_parser.output_node_names
operation_nodes = self.operation_parser.operation_nodes
# The head end of the code
head = ""
# The tail end of the code
tail = ""
reverse_graph = self.graph.reverse(copy=True)
for idx, output_node in enumerate(output_node_names):
bfs_edges = networkx.bfs_edges(reverse_graph, source=output_node)
for bfs_edge in bfs_edges:
from_node_name, _ = bfs_edge
current_nx_node = self.graph.nodes[from_node_name]
if current_nx_node.get(NODE_TYPE_ATTRIBUTE, None) == NODE_TYPE_OPERATION:
op_node: OperationNode = next(
(op_node for op_node in operation_nodes if op_node.node_name == from_node_name), None)
head = "\n" + op_node.operation_code + "\n" + head
tail = f'{", ".join(op_node.return_names)}={op_node.function_definition}\n' + tail
# Adding the session id and task name to the code
tail = f'\nsession_id = "{self.session_id}"\n' + \
f'task_name = "{self.task_name}"\n' + \
f'storage_mode = "{self.storage_mode}"\n' + \
tail
dispatcher.send(signal=SIGNAL_TAIL_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"Generated final code block.",
event_data=tail,
event_type=EventType.PythonCode
))
assembled_code = head + "\n" + tail
assembled_code = f'{get_shim_imports()}\n{assembled_code}'
return assembled_code
def get_operation_requirement(self, op_node: OperationNode) -> list[str]:
node_name = op_node.node_name
task_def = self.task_def.strip("\n").strip()
op_properties = [
f'The function description is: {op_node.description}',
f'The type of work done in this function is: {op_node.operation_type}',
f'This function is one step to solve the question/task: {task_def}'
]
op_properties_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(op_properties)])
operation_requirement_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(predefined_operation_requirements)])
op_req_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_requirement_gen_task_prefix)
chain = LLMChain(llm=self.llm, prompt=op_req_gen_prompt_template)
req_gen_response = chain.run(
human_role=HUMAN_ROLE,
operation_req_gen_intro=operation_code_gen_intro,
operation_name=node_name,
pre_requirements=operation_requirement_str,
operation_properties=op_properties_str,
assistant_role=ASSISTANT_ROLE,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
operation_requirement_json = extract_content_xml("json", req_gen_response)
operation_requirement_list: List[str] = json.loads(operation_requirement_json)
operation_requirement_list = shim_instructions + operation_requirement_list
return operation_requirement_list
def gen_operation_code(self, op_node: OperationNode) -> OperationCodeGenOutput:
operation_requirement_list = self.get_operation_requirement(op_node)
node_name = op_node.node_name
# Get ancestors operations functions. For operations that has ancestors, this will also come with LLM
# generated code for the operations.
ancestor_op_nodes = self.operation_parser.get_ancestors(node_name)
ancestor_op_nodes_code = '\n'.join([op_node.operation_code for op_node in ancestor_op_nodes])
descendant_op_node = self.operation_parser.get_descendants(node_name)
descendant_op_node_defs = self.operation_parser.stringify_nodes(descendant_op_node)
pre_requirements = [
f'The function description is: {op_node.description}',
f'The function definition is: {op_node.function_definition}',
f'The function return line is: {op_node.return_line}'
]
operation_requirements_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(pre_requirements + operation_requirement_list)])
op_code_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_code_gen_prompt_template)
op_code_gen_prompt = op_code_gen_prompt_template.format(
human_role=HUMAN_ROLE,
operation_code_gen_intro=operation_code_gen_intro, | operation_task_prefix=operation_task_prefix, | 4 | 2023-11-09 18:29:25+00:00 | 8k |
sammysun0711/ov_llm_bench | inference_engine.py | [
{
"identifier": "OVQwenModel",
"path": "modeling.py",
"snippet": "class OVQwenModel(OVModelForCausalLM):\n def __init__(\n self,\n model: Model,\n config: PretrainedConfig = None,\n device: str = 'CPU',\n dynamic_shapes: bool = True,\n ov_config: Optional[Dict[str, str]] = None,\n model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,\n **kwargs,\n ):\n NormalizedConfigManager._conf['qwen'] = NormalizedTextConfig.with_args(\n num_layers='num_layers', num_attention_heads='num_attention_heads')\n super().__init__(model, config, device, dynamic_shapes,\n ov_config, model_save_dir, **kwargs)\n\n def _reshape(\n self,\n model: Model,\n batch_size: int,\n sequence_length: int,\n height: int = None,\n width: int = None,\n ):\n shapes = {}\n for inputs in model.inputs:\n shapes[inputs] = inputs.get_partial_shape()\n shapes[inputs][0] = -1\n shapes[inputs][1] = -1\n model.reshape(shapes)\n return model\n\n @classmethod\n def _from_pretrained(\n cls,\n model_id: Union[str, Path],\n config: PretrainedConfig,\n use_auth_token: Optional[Union[bool, str, None]] = None,\n revision: Optional[Union[str, None]] = None,\n force_download: bool = False,\n cache_dir: Optional[str] = None,\n file_name: Optional[str] = None,\n subfolder: str = \"\",\n from_onnx: bool = False,\n local_files_only: bool = False,\n load_in_8bit: bool = False,\n **kwargs,\n ):\n model_path = Path(model_id)\n default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME\n file_name = file_name or default_file_name\n\n model_cache_path = cls._cached_file(\n model_path=model_path,\n use_auth_token=use_auth_token,\n revision=revision,\n force_download=force_download,\n cache_dir=cache_dir,\n file_name=file_name,\n subfolder=subfolder,\n local_files_only=local_files_only,\n )\n\n model = cls.load_model(model_cache_path, load_in_8bit=load_in_8bit)\n init_cls = OVQwenModel\n\n return init_cls(model=model, config=config, model_save_dir=model_cache_path.parent, **kwargs)\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n past_key_values = past_key_values or kwargs.get(\"past\", None)\n\n # `past_key_values` may be in the stardard format (e.g. in contrastive search), converts to bloom's format if needed\n if past_key_values is not None and self.config.model_type == \"bloom\":\n if past_key_values[0][0].shape[0] == input_ids.shape[0]:\n past_key_values = self._convert_to_bloom_cache(past_key_values)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": self.use_cache,\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": None,\n }\n\n def _update_model_kwargs_for_generation(\n self,\n outputs: \"ModelOutput\",\n model_kwargs: Dict[str, \"Any\"],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n ) -> Dict[str, \"Any\"]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n # update attention mask\n if \"attention_mask\" in model_kwargs:\n attention_mask = model_kwargs[\"attention_mask\"]\n model_kwargs[\"attention_mask\"] = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # update position ids\n if \"position_ids\" in model_kwargs:\n position_ids = model_kwargs[\"position_ids\"]\n new_position_id = position_ids[..., -1:].clone()\n new_position_id += 1\n model_kwargs[\"position_ids\"] = torch.cat([position_ids, new_position_id], dim=-1)\n\n model_kwargs[\"is_first_forward\"] = False\n return model_kwargs\n\n\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[\n Callable[[int, torch.Tensor], List[int]]\n ] = None,\n synced_gpus: Optional[bool] = None,\n #assistant_model: Optional[\"PreTrainedModel\"] = None,\n #streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n # Process stop_words_ids.\n stop_words_ids = kwargs.pop(\"stop_words_ids\", [[151643]])\n if stop_words_ids is None and generation_config is not None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n if stop_words_ids is None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n\n return super().generate(\n inputs,\n generation_config=generation_config,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n synced_gpus=synced_gpus,\n **kwargs,\n )"
},
{
"identifier": "OVChatGLM2Model",
"path": "modeling.py",
"snippet": "class OVChatGLM2Model(OVModelForCausalLM):\n def __init__(\n self,\n model: Model,\n config: PretrainedConfig = None,\n device: str = 'CPU',\n dynamic_shapes: bool = True,\n ov_config: Optional[Dict[str, str]] = None,\n model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,\n **kwargs,\n ):\n NormalizedConfigManager._conf['chatglm'] = NormalizedTextConfig.with_args(\n num_layers='num_layers', num_attention_heads='num_attention_heads')\n super().__init__(model, config, device, dynamic_shapes,\n ov_config, model_save_dir, **kwargs)\n\n def _reshape(\n self,\n model: Model,\n batch_size: int,\n sequence_length: int,\n height: int = None,\n width: int = None,\n ):\n shapes = {}\n for inputs in model.inputs:\n shapes[inputs] = inputs.get_partial_shape()\n shapes[inputs][0] = -1\n input_name = inputs.get_any_name()\n if input_name.startswith('past_key_values'):\n shapes[inputs][1] = -1\n shapes[inputs][2] = 2\n else:\n shapes[inputs][1] = -1\n model.reshape(shapes)\n return model\n\n def forward(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n position_ids: Optional[torch.LongTensor] = None,\n **kwargs,\n ) -> CausalLMOutputWithPast:\n self.compile()\n\n if self.use_cache and past_key_values is not None:\n input_ids = input_ids[:, -1:]\n\n inputs = {}\n if past_key_values is not None:\n if self._pkv_precision == Type.bf16:\n # numpy does not support bf16, pretending f16, should change to bf16\n past_key_values = tuple(\n Tensor(past_key_value, past_key_value.shape, Type.bf16) for pkv_per_layer in past_key_values for past_key_value in pkv_per_layer\n )\n else:\n # Flatten the past_key_values\n past_key_values = tuple(\n past_key_value for pkv_per_layer in past_key_values for past_key_value in pkv_per_layer)\n # Add the past_key_values to the decoder inputs\n inputs = dict(zip(self.key_value_input_names, past_key_values))\n\n # Create empty past_key_values for decoder_with_past first generation step\n elif self.use_cache:\n for input_name in self.key_value_input_names:\n model_inputs = self.model.input(input_name)\n shape = model_inputs.get_partial_shape()\n shape[0] = 0\n if shape[1].is_dynamic:\n shape[1] = 1\n inputs[input_name] = Tensor(\n model_inputs.get_element_type(), shape.get_shape())\n\n inputs['input_ids'] = np.array(input_ids)\n\n if 'position_ids' in self.input_names and position_ids is not None:\n inputs['position_ids'] = np.array(position_ids)\n\n # Add the attention_mask inputs when needed\n if 'attention_mask' in self.input_names and attention_mask is not None:\n inputs['attention_mask'] = np.array(attention_mask)\n\n # Run inference\n self.request.start_async(inputs, share_inputs=True)\n self.request.wait()\n\n logits = torch.from_numpy(\n self.request.get_tensor('logits').data).to(self.device)\n\n if self.use_cache:\n # Tuple of length equal to : number of layer * number of past_key_value per decoder layer (2 corresponds to the self-attention layer)\n past_key_values = tuple(self.request.get_tensor(\n key).data for key in self.key_value_output_names)\n # Tuple of tuple of length `n_layers`, with each tuple of length equal to 2 (k/v of self-attention)\n past_key_values = tuple(\n past_key_values[i: i + self.num_pkv] for i in range(0, len(past_key_values), self.num_pkv))\n else:\n past_key_values = None\n\n return CausalLMOutputWithPast(logits=logits, past_key_values=past_key_values)\n\n def get_position_ids(self, input_ids, device):\n batch_size, seq_length = input_ids.shape\n position_ids = torch.arange(\n seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n return position_ids\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n past_key_values = past_key_values or kwargs.get(\"past\", None)\n\n # `past_key_values` may be in the stardard format (e.g. in contrastive search), converts to bloom's format if needed\n if past_key_values is not None and self.config.model_type == \"bloom\":\n if past_key_values[0][0].shape[0] == input_ids.shape[0]:\n past_key_values = self._convert_to_bloom_cache(past_key_values)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": self.use_cache,\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": None,\n }\n\n def _update_model_kwargs_for_generation(\n self,\n outputs: \"ModelOutput\",\n model_kwargs: Dict[str, \"Any\"],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n ) -> Dict[str, \"Any\"]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n # update attention mask\n if \"attention_mask\" in model_kwargs:\n attention_mask = model_kwargs[\"attention_mask\"]\n model_kwargs[\"attention_mask\"] = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # update position ids\n if \"position_ids\" in model_kwargs:\n position_ids = model_kwargs[\"position_ids\"]\n new_position_id = position_ids[..., -1:].clone()\n new_position_id += 1\n model_kwargs[\"position_ids\"] = torch.cat(\n [position_ids, new_position_id], dim=-1)\n\n model_kwargs[\"is_first_forward\"] = False\n return model_kwargs\n\n @classmethod\n def _from_pretrained(\n cls,\n model_id: Union[str, Path],\n config: PretrainedConfig,\n use_auth_token: Optional[Union[bool, str, None]] = None,\n revision: Optional[Union[str, None]] = None,\n force_download: bool = False,\n cache_dir: Optional[str] = None,\n file_name: Optional[str] = None,\n subfolder: str = \"\",\n from_onnx: bool = False,\n local_files_only: bool = False,\n load_in_8bit: bool = False,\n **kwargs,\n ):\n model_path = Path(model_id)\n default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME\n file_name = file_name or default_file_name\n\n model_cache_path = cls._cached_file(\n model_path=model_path,\n use_auth_token=use_auth_token,\n revision=revision,\n force_download=force_download,\n cache_dir=cache_dir,\n file_name=file_name,\n subfolder=subfolder,\n local_files_only=local_files_only,\n )\n\n model = cls.load_model(model_cache_path, load_in_8bit=load_in_8bit)\n init_cls = OVChatGLM2Model\n\n return init_cls(model=model, config=config, model_save_dir=model_cache_path.parent, **kwargs)"
},
{
"identifier": "print_perf_counters_sort",
"path": "utils.py",
"snippet": "def print_perf_counters_sort(perf_counts_list,sort_flag=\"sort\"):\n \"\"\" Print opts time cost and can be sorted according by each opts time cost\n \"\"\"\n for ni in range(len(perf_counts_list)):\n perf_counts = perf_counts_list[ni]\n total_time = timedelta()\n total_time_cpu = timedelta()\n print(f\"Performance counts sorted for {ni}-th infer request\")\n for pi in perf_counts:\n total_time += pi.real_time\n total_time_cpu += pi.cpu_time\n\n total_time = total_time.microseconds\n total_time_cpu = total_time_cpu.microseconds\n total_real_time_proportion = 0\n total_detail_data=[]\n for pi in perf_counts:\n node_name = pi.node_name\n layerStatus = pi.status\n layerType = pi.node_type\n real_time = pi.real_time.microseconds\n cpu_time = pi.cpu_time.microseconds\n real_proportion = round(real_time/total_time,4)\n execType = pi.exec_type\n tmp_data=[node_name,layerStatus,layerType,real_time,cpu_time,real_proportion,execType]\n total_detail_data.append(tmp_data)\n total_real_time_proportion += real_proportion\n total_detail_data = np.array(total_detail_data)\n if sort_flag==\"sort\":\n total_detail_data = sorted(total_detail_data,key=lambda tmp_data:tmp_data[-4],reverse=True)\n elif sort_flag==\"no_sort\":\n total_detail_data = total_detail_data\n elif sort_flag==\"simple_sort\":\n total_detail_data = sorted(total_detail_data,key=lambda tmp_data:tmp_data[-4],reverse=True)\n total_detail_data = [tmp_data for tmp_data in total_detail_data if str(tmp_data[1])!=\"Status.NOT_RUN\"]\n print_detail_result(total_detail_data)\n print(f'Total time: {total_time / 1000:.3f} milliseconds')\n print(f'Total CPU time: {total_time_cpu / 1000:.3f} milliseconds')\n print(f'Total proportion: {\"%.2f\"%(round(total_real_time_proportion)*100)} % \\n')\n return total_detail_data"
}
] | import time
import gc
import numpy as np
from transformers import AutoTokenizer, TextIteratorStreamer, AutoConfig, GenerationConfig
from optimum.intel.openvino import OVModelForCausalLM
from threading import Thread, Event
from time import perf_counter
from typing import List
from modeling import OVQwenModel, OVChatGLM2Model
from utils import print_perf_counters_sort | 4,455 |
"""
from utils import MemConsumption
mem_consumption = MemConsumption()
max_rss_mem_consumption = ''
max_shared_mem_consumption = ''
"""
class InferenceEngine:
def __init__(self, args=None, ov_config=None):
self.args = args
self.config = AutoConfig.from_pretrained(
self.args.model_id, trust_remote_code=True)
s = time.time()
if self.config.model_type == "llama":
print("Loading Llama2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id, trust_remote_code=True)
self.ov_model = OVModelForCausalLM.from_pretrained(self.args.model_id,
compile=False,
device=self.args.device,
ov_config=ov_config,
trust_remote_code=True)
elif self.config.model_type == "chatglm":
print("Loading ChatGLM2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id,
padding_side='left',
trust_remote_code=True)
|
"""
from utils import MemConsumption
mem_consumption = MemConsumption()
max_rss_mem_consumption = ''
max_shared_mem_consumption = ''
"""
class InferenceEngine:
def __init__(self, args=None, ov_config=None):
self.args = args
self.config = AutoConfig.from_pretrained(
self.args.model_id, trust_remote_code=True)
s = time.time()
if self.config.model_type == "llama":
print("Loading Llama2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id, trust_remote_code=True)
self.ov_model = OVModelForCausalLM.from_pretrained(self.args.model_id,
compile=False,
device=self.args.device,
ov_config=ov_config,
trust_remote_code=True)
elif self.config.model_type == "chatglm":
print("Loading ChatGLM2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id,
padding_side='left',
trust_remote_code=True) | self.ov_model = OVChatGLM2Model.from_pretrained(self.args.model_id, | 1 | 2023-11-08 02:09:04+00:00 | 8k |
Rishit-dagli/Astroformer | pytorch-image-models/timm/models/efficientvit_mit.py | [
{
"identifier": "build_model_with_cfg",
"path": "pytorch-image-models/timm/models/_builder.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n pretrained_cfg_overlay: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_cfg: Optional[Dict] = None,\n pretrained_strict: bool = True,\n pretrained_filter_fn: Optional[Callable] = None,\n kwargs_filter: Optional[Tuple[str]] = None,\n **kwargs,\n):\n \"\"\" Build model with specified default_cfg and optional model_cfg\n\n This helper fn aids in the construction of a model including:\n * handling default_cfg and associated pretrained weight loading\n * passing through optional model_cfg for models with config based arch spec\n * features_only model adaptation\n * pruning config / model adaptation\n\n Args:\n model_cls (nn.Module): model class\n variant (str): model variant name\n pretrained (bool): load pretrained weights\n pretrained_cfg (dict): model's pretrained weight/task config\n model_cfg (Optional[Dict]): model's architecture config\n feature_cfg (Optional[Dict]: feature extraction adapter config\n pretrained_strict (bool): load pretrained weights strictly\n pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights\n kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model\n **kwargs: model args passed through to model __init__\n \"\"\"\n pruned = kwargs.pop('pruned', False)\n features = False\n feature_cfg = feature_cfg or {}\n\n # resolve and update model pretrained config and model kwargs\n pretrained_cfg = resolve_pretrained_cfg(\n variant,\n pretrained_cfg=pretrained_cfg,\n pretrained_cfg_overlay=pretrained_cfg_overlay\n )\n\n # FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model\n pretrained_cfg = pretrained_cfg.to_dict()\n\n _update_default_kwargs(pretrained_cfg, kwargs, kwargs_filter)\n\n # Setup for feature extraction wrapper done at end of this fn\n if kwargs.pop('features_only', False):\n features = True\n feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))\n if 'out_indices' in kwargs:\n feature_cfg['out_indices'] = kwargs.pop('out_indices')\n\n # Instantiate the model\n if model_cfg is None:\n model = model_cls(**kwargs)\n else:\n model = model_cls(cfg=model_cfg, **kwargs)\n model.pretrained_cfg = pretrained_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n\n if pruned:\n model = adapt_model_from_file(model, variant)\n\n # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats\n num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))\n if pretrained:\n load_pretrained(\n model,\n pretrained_cfg=pretrained_cfg,\n num_classes=num_classes_pretrained,\n in_chans=kwargs.get('in_chans', 3),\n filter_fn=pretrained_filter_fn,\n strict=pretrained_strict,\n )\n\n # Wrap the model in a feature extraction module if enabled\n if features:\n feature_cls = FeatureListNet\n output_fmt = getattr(model, 'output_fmt', None)\n if output_fmt is not None:\n feature_cfg.setdefault('output_fmt', output_fmt)\n if 'feature_cls' in feature_cfg:\n feature_cls = feature_cfg.pop('feature_cls')\n if isinstance(feature_cls, str):\n feature_cls = feature_cls.lower()\n if 'hook' in feature_cls:\n feature_cls = FeatureHookNet\n elif feature_cls == 'fx':\n feature_cls = FeatureGraphNet\n else:\n assert False, f'Unknown feature class {feature_cls}'\n model = feature_cls(model, **feature_cfg)\n model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg\n model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)\n\n return model"
},
{
"identifier": "register_notrace_module",
"path": "pytorch-image-models/timm/models/_features_fx.py",
"snippet": "def register_notrace_module(module: Type[nn.Module]):\n \"\"\"\n Any module not under timm.models.layers should get this decorator if we don't want to trace through it.\n \"\"\"\n _leaf_modules.add(module)\n return module"
},
{
"identifier": "checkpoint_seq",
"path": "pytorch-image-models/timm/models/_manipulate.py",
"snippet": "def checkpoint_seq(\n functions,\n x,\n every=1,\n flatten=False,\n skip_last=False,\n preserve_rng_state=True\n):\n r\"\"\"A helper function for checkpointing sequential models.\n\n Sequential models execute a list of modules/functions in order\n (sequentially). Therefore, we can divide such a sequence into segments\n and checkpoint each segment. All segments except run in :func:`torch.no_grad`\n manner, i.e., not storing the intermediate activations. The inputs of each\n checkpointed segment will be saved for re-running the segment in the backward pass.\n\n See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.\n\n .. warning::\n Checkpointing currently only supports :func:`torch.autograd.backward`\n and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`\n is not supported.\n\n .. warning:\n At least one of the inputs needs to have :code:`requires_grad=True` if\n grads are needed for model inputs, otherwise the checkpointed part of the\n model won't have gradients.\n\n Args:\n functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.\n x: A Tensor that is input to :attr:`functions`\n every: checkpoint every-n functions (default: 1)\n flatten (bool): flatten nn.Sequential of nn.Sequentials\n skip_last (bool): skip checkpointing the last function in the sequence if True\n preserve_rng_state (bool, optional, default=True): Omit stashing and restoring\n the RNG state during each checkpoint.\n\n Returns:\n Output of running :attr:`functions` sequentially on :attr:`*inputs`\n\n Example:\n >>> model = nn.Sequential(...)\n >>> input_var = checkpoint_seq(model, input_var, every=2)\n \"\"\"\n def run_function(start, end, functions):\n def forward(_x):\n for j in range(start, end + 1):\n _x = functions[j](_x)\n return _x\n return forward\n\n if isinstance(functions, torch.nn.Sequential):\n functions = functions.children()\n if flatten:\n functions = chain.from_iterable(functions)\n if not isinstance(functions, (tuple, list)):\n functions = tuple(functions)\n\n num_checkpointed = len(functions)\n if skip_last:\n num_checkpointed -= 1\n end = -1\n for start in range(0, num_checkpointed, every):\n end = min(start + every - 1, num_checkpointed - 1)\n x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)\n if skip_last:\n return run_function(end + 1, len(functions) - 1, functions)(x)\n return x"
},
{
"identifier": "register_model",
"path": "pytorch-image-models/timm/models/_registry.py",
"snippet": "def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:\n # lookup containing module\n mod = sys.modules[fn.__module__]\n module_name_split = fn.__module__.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n # add model to __all__ in module\n model_name = fn.__name__\n if hasattr(mod, '__all__'):\n mod.__all__.append(model_name)\n else:\n mod.__all__ = [model_name] # type: ignore\n\n # add entries to registry dict/sets\n _model_entrypoints[model_name] = fn\n _model_to_module[model_name] = module_name\n _module_to_models[module_name].add(model_name)\n if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:\n # this will catch all models that have entrypoint matching cfg key, but miss any aliasing\n # entrypoints or non-matching combos\n default_cfg = mod.default_cfgs[model_name]\n if not isinstance(default_cfg, DefaultCfg):\n # new style default cfg dataclass w/ multiple entries per model-arch\n assert isinstance(default_cfg, dict)\n # old style cfg dict per model-arch\n pretrained_cfg = PretrainedCfg(**default_cfg)\n default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})\n\n for tag_idx, tag in enumerate(default_cfg.tags):\n is_default = tag_idx == 0\n pretrained_cfg = default_cfg.cfgs[tag]\n model_name_tag = '.'.join([model_name, tag]) if tag else model_name\n replace_items = dict(architecture=model_name, tag=tag if tag else None)\n if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':\n # auto-complete hub name w/ architecture.tag\n replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag\n pretrained_cfg = replace(pretrained_cfg, **replace_items)\n\n if is_default:\n _model_pretrained_cfgs[model_name] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add tagless entry if it's default and has weights\n _model_has_pretrained.add(model_name)\n\n if tag:\n _model_pretrained_cfgs[model_name_tag] = pretrained_cfg\n if pretrained_cfg.has_weights:\n # add model w/ tag if tag is valid\n _model_has_pretrained.add(model_name_tag)\n _model_with_tags[model_name].append(model_name_tag)\n else:\n _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)\n\n _model_default_cfgs[model_name] = default_cfg\n\n return fn"
},
{
"identifier": "generate_default_cfgs",
"path": "pytorch-image-models/timm/models/_registry.py",
"snippet": "def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):\n out = defaultdict(DefaultCfg)\n default_set = set() # no tag and tags ending with * are prioritized as default\n\n for k, v in cfgs.items():\n if isinstance(v, dict):\n v = PretrainedCfg(**v)\n has_weights = v.has_weights\n\n model, tag = split_model_name_tag(k)\n is_default_set = model in default_set\n priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)\n tag = tag.strip('*')\n\n default_cfg = out[model]\n\n if priority:\n default_cfg.tags.appendleft(tag)\n default_set.add(model)\n elif has_weights and not default_cfg.is_pretrained:\n default_cfg.tags.appendleft(tag)\n else:\n default_cfg.tags.append(tag)\n\n if has_weights:\n default_cfg.is_pretrained = True\n\n default_cfg.cfgs[tag] = v\n\n return out"
}
] | from typing import Optional
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, create_conv2d
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
import torch
import torch.nn as nn
import torch.nn.functional as F | 4,793 | super(ClassifierHead, self).__init__()
self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW')
self.classifier = nn.Sequential(
nn.Linear(widths[0], widths[1], bias=False),
nn.LayerNorm(widths[1]),
act_layer(inplace=True),
nn.Dropout(dropout, inplace=False),
nn.Linear(widths[1], n_classes, bias=True),
)
def forward(self, x, pre_logits: bool = False):
x = self.in_conv(x)
x = self.global_pool(x)
if pre_logits:
return x
x = self.classifier(x)
return x
class EfficientVit(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
):
super(EfficientVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer)
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
expand_ratio=expand_ratio,
head_dim=head_dim,
vit_stage=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
| """ EfficientViT (by MIT Song Han's Lab)
Paper: `Efficientvit: Enhanced linear attention for high-resolution low-computation visual recognition`
- https://arxiv.org/abs/2205.14756
Adapted from official impl at https://github.com/mit-han-lab/efficientvit
"""
__all__ = ['EfficientVit']
def val2list(x: list or tuple or any, repeat_time=1):
if isinstance(x, (list, tuple)):
return list(x)
return [x for _ in range(repeat_time)]
def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1):
# repeat elements if necessary
x = val2list(x)
if len(x) > 0:
x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))]
return tuple(x)
def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]:
if isinstance(kernel_size, tuple):
return tuple([get_same_padding(ks) for ks in kernel_size])
else:
assert kernel_size % 2 > 0, "kernel size should be odd number"
return kernel_size // 2
class ConvNormAct(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
dilation=1,
groups=1,
bias=False,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(ConvNormAct, self).__init__()
self.dropout = nn.Dropout(dropout, inplace=False)
self.conv = create_conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity()
self.act = act_layer(inplace=True) if act_layer else nn.Identity()
def forward(self, x):
x = self.dropout(x)
x = self.conv(x)
x = self.norm(x)
x = self.act(x)
return x
class DSConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, None),
):
super(DSConv, self).__init__()
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.depth_conv = ConvNormAct(
in_channels,
in_channels,
kernel_size,
stride,
groups=in_channels,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.point_conv = ConvNormAct(
in_channels,
out_channels,
1,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
def forward(self, x):
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MBConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
mid_channels=None,
expand_ratio=6,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, nn.ReLU6, None),
):
super(MBConv, self).__init__()
use_bias = val2tuple(use_bias, 3)
norm_layer = val2tuple(norm_layer, 3)
act_layer = val2tuple(act_layer, 3)
mid_channels = mid_channels or round(in_channels * expand_ratio)
self.inverted_conv = ConvNormAct(
in_channels,
mid_channels,
1,
stride=1,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.depth_conv = ConvNormAct(
mid_channels,
mid_channels,
kernel_size,
stride=stride,
groups=mid_channels,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
self.point_conv = ConvNormAct(
mid_channels,
out_channels,
1,
norm_layer=norm_layer[2],
act_layer=act_layer[2],
bias=use_bias[2],
)
def forward(self, x):
x = self.inverted_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class LiteMSA(nn.Module):
"""Lightweight multi-scale attention"""
def __init__(
self,
in_channels: int,
out_channels: int,
heads: int or None = None,
heads_ratio: float = 1.0,
dim=8,
use_bias=False,
norm_layer=(None, nn.BatchNorm2d),
act_layer=(None, None),
kernel_func=nn.ReLU,
scales=(5,),
eps=1e-5,
):
super(LiteMSA, self).__init__()
self.eps = eps
heads = heads or int(in_channels // dim * heads_ratio)
total_dim = heads * dim
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.dim = dim
self.qkv = ConvNormAct(
in_channels,
3 * total_dim,
1,
bias=use_bias[0],
norm_layer=norm_layer[0],
act_layer=act_layer[0],
)
self.aggreg = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
3 * total_dim,
3 * total_dim,
scale,
padding=get_same_padding(scale),
groups=3 * total_dim,
bias=use_bias[0],
),
nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0]),
)
for scale in scales
])
self.kernel_func = kernel_func(inplace=False)
self.proj = ConvNormAct(
total_dim * (1 + len(scales)),
out_channels,
1,
bias=use_bias[1],
norm_layer=norm_layer[1],
act_layer=act_layer[1],
)
def _attn(self, q, k, v):
dtype = v.dtype
q, k, v = q.float(), k.float(), v.float()
kv = k.transpose(-1, -2) @ v
out = q @ kv
out = out[..., :-1] / (out[..., -1:] + self.eps)
return out.to(dtype)
def forward(self, x):
B, _, H, W = x.shape
# generate multi-scale q, k, v
qkv = self.qkv(x)
multi_scale_qkv = [qkv]
for op in self.aggreg:
multi_scale_qkv.append(op(qkv))
multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1)
multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2)
q, k, v = multi_scale_qkv.chunk(3, dim=-1)
# lightweight global attention
q = self.kernel_func(q)
k = self.kernel_func(k)
v = F.pad(v, (0, 1), mode="constant", value=1.)
if not torch.jit.is_scripting():
with torch.autocast(device_type=v.device.type, enabled=False):
out = self._attn(q, k, v)
else:
out = self._attn(q, k, v)
# final projection
out = out.transpose(-1, -2).reshape(B, -1, H, W)
out = self.proj(out)
return out
register_notrace_module(LiteMSA)
class EfficientVitBlock(nn.Module):
def __init__(
self,
in_channels,
heads_ratio=1.0,
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
):
super(EfficientVitBlock, self).__init__()
self.context_module = ResidualBlock(
LiteMSA(
in_channels=in_channels,
out_channels=in_channels,
heads_ratio=heads_ratio,
dim=head_dim,
norm_layer=(None, norm_layer),
),
nn.Identity(),
)
self.local_module = ResidualBlock(
MBConv(
in_channels=in_channels,
out_channels=in_channels,
expand_ratio=expand_ratio,
use_bias=(True, True, False),
norm_layer=(None, None, norm_layer),
act_layer=(act_layer, act_layer, None),
),
nn.Identity(),
)
def forward(self, x):
x = self.context_module(x)
x = self.local_module(x)
return x
class ResidualBlock(nn.Module):
def __init__(
self,
main: Optional[nn.Module],
shortcut: Optional[nn.Module] = None,
pre_norm: Optional[nn.Module] = None,
):
super(ResidualBlock, self).__init__()
self.pre_norm = pre_norm if pre_norm is not None else nn.Identity()
self.main = main
self.shortcut = shortcut
def forward(self, x):
res = self.main(self.pre_norm(x))
if self.shortcut is not None:
res = res + self.shortcut(x)
return res
def build_local_block(
in_channels: int,
out_channels: int,
stride: int,
expand_ratio: float,
norm_layer: str,
act_layer: str,
fewer_norm: bool = False,
):
if expand_ratio == 1:
block = DSConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=(True, False) if fewer_norm else False,
norm_layer=(None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, None),
)
else:
block = MBConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expand_ratio=expand_ratio,
use_bias=(True, True, False) if fewer_norm else False,
norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, act_layer, None),
)
return block
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer):
super().__init__()
self.stride = 2
self.add_module(
'in_conv',
ConvNormAct(
in_chs, out_chs,
kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer,
)
)
stem_block = 0
for _ in range(depth):
self.add_module(f'res{stem_block}', ResidualBlock(
build_local_block(
in_channels=out_chs,
out_channels=out_chs,
stride=1,
expand_ratio=1,
norm_layer=norm_layer,
act_layer=act_layer,
),
nn.Identity(),
))
stem_block += 1
class EfficientVitStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth,
norm_layer,
act_layer,
expand_ratio,
head_dim,
vit_stage=False,
):
super(EfficientVitStage, self).__init__()
blocks = [ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=2,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
fewer_norm=vit_stage,
),
None,
)]
in_chs = out_chs
if vit_stage:
# for stage 3, 4
for _ in range(depth):
blocks.append(
EfficientVitBlock(
in_channels=in_chs,
head_dim=head_dim,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
)
)
else:
# for stage 1, 2
for i in range(1, depth):
blocks.append(ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=1,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer
),
nn.Identity(),
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
return self.blocks(x)
class ClassifierHead(nn.Module):
def __init__(
self,
in_channels,
widths,
n_classes=1000,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
):
super(ClassifierHead, self).__init__()
self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW')
self.classifier = nn.Sequential(
nn.Linear(widths[0], widths[1], bias=False),
nn.LayerNorm(widths[1]),
act_layer(inplace=True),
nn.Dropout(dropout, inplace=False),
nn.Linear(widths[1], n_classes, bias=True),
)
def forward(self, x, pre_logits: bool = False):
x = self.in_conv(x)
x = self.global_pool(x)
if pre_logits:
return x
x = self.classifier(x)
return x
class EfficientVit(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
):
super(EfficientVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer)
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
expand_ratio=expand_ratio,
head_dim=head_dim,
vit_stage=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting(): | x = checkpoint_seq(self.stages, x) | 2 | 2023-11-05 01:25:14+00:00 | 8k |
AdFiFi/D-FaST | utils/trainer.py | [
{
"identifier": "init_model_config",
"path": "config.py",
"snippet": "def init_model_config(args, data_config: DataConfig):\r\n if args.model == \"BNT\":\r\n model_config = BNTConfig(node_size=data_config.node_size,\r\n sizes=(data_config.node_size, data_config.node_size // 2),\r\n num_classes=data_config.num_class,\r\n pooling=(False, True),\r\n pos_encoding=None, # identity, none\r\n orthogonal=True,\r\n # freeze_center=True,\r\n freeze_center=False,\r\n project_assignment=True,\r\n num_heads=args.num_heads,\r\n pos_embed_dim=data_config.node_size,\r\n dim_feedforward=1024,\r\n )\r\n model = BNT(model_config)\r\n elif args.model == \"FBNetGen\":\r\n model_config = FBNetGenConfig(activation='gelu',\r\n dropout=0.5,\r\n # extractor_type='gru', # gru or cnn\r\n extractor_type='cnn', # gru or cnn\r\n # d_model=16,\r\n d_model=40,\r\n node_size=data_config.node_size,\r\n node_feature_size=data_config.node_feature_size,\r\n time_series_size=data_config.time_series_size,\r\n num_classes=data_config.num_class,\r\n window_size=5,\r\n # window_size=40,\r\n # window_size=50,\r\n cnn_pool_size=16,\r\n graph_generation='product', # product or linear\r\n num_gru_layers=4,\r\n group_loss=True,\r\n sparsity_loss=True,\r\n sparsity_loss_weight=1.0e-4)\r\n model = FBNetGen(model_config)\r\n elif args.model == 'BrainNetCNN':\r\n model_config = BrainNetCNNConfig(node_size=data_config.node_size,\r\n num_classes=data_config.num_class)\r\n model = BrainNetCNN(model_config)\r\n elif args.model == 'STAGIN':\r\n model_config = STAGINConfig(node_size=data_config.node_size,\r\n num_classes=data_config.num_class,\r\n d_model=args.d_model,\r\n num_layers=args.num_layers,\r\n window_size=args.window_size,\r\n window_stride=args.window_stride,\r\n dynamic_length=args.dynamic_length,\r\n sampling_init=args.sampling_init)\r\n model = STAGIN(model_config)\r\n elif args.model == \"Transformer\":\r\n model_config = TransformerConfig(node_size=data_config.node_size,\r\n num_classes=data_config.num_class,\r\n node_feature_size=data_config.node_feature_size,\r\n readout='concat',\r\n num_layers=args.num_layers)\r\n model = Transformer(model_config)\r\n elif args.model == \"EEGNet\":\r\n model_config = EEGNetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n frequency=args.frequency,\r\n D=args.D,\r\n num_kernels=args.num_kernels,\r\n p1=args.p1,\r\n p2=args.p2,\r\n dropout=args.dropout)\r\n model_config.class_weight = data_config.class_weight\r\n model = EEGNet(model_config)\r\n elif args.model == \"DFaST\":\r\n model_config = DFaSTConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n sparsity=args.sparsity,\r\n frequency=args.frequency,\r\n D=args.D,\r\n p1=args.p1,\r\n p2=args.p2,\r\n k=args.k,\r\n num_kernels=args.num_kernels,\r\n d_model=args.d_model,\r\n window_size=args.window_size,\r\n window_stride=args.window_stride,\r\n dynamic_length=args.dynamic_length,\r\n num_heads=args.num_heads,\r\n dim_feedforward=args.dim_feedforward,\r\n num_spatial_layers=args.num_layers,\r\n num_node_temporal_layers=args.num_node_temporal_layers,\r\n num_graph_temporal_layers=args.num_graph_temporal_layers,\r\n attention_depth=args.attention_depth,\r\n activation=args.activation,\r\n dropout=args.dropout,\r\n # distill=(False, ) + (args.num_layers - 1) *\r\n # ((True,) if args.distill else (False,)),\r\n distill=args.num_layers * ((True,) if args.distill else (False,)),\r\n initializer=args.initializer,\r\n label_smoothing=args.epsilon_ls\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = DFaSTForClassification(model_config)\r\n elif args.model == \"DFaSTOnlySpatial\":\r\n model_config = DFaSTConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n sparsity=args.sparsity,\r\n frequency=args.frequency,\r\n D=args.D,\r\n p1=args.p1,\r\n p2=args.p2,\r\n k=args.k,\r\n num_kernels=args.num_kernels,\r\n d_model=args.d_model,\r\n window_size=args.window_size,\r\n window_stride=args.window_stride,\r\n dynamic_length=args.dynamic_length,\r\n num_heads=args.num_heads,\r\n dim_feedforward=args.dim_feedforward,\r\n num_spatial_layers=args.num_layers,\r\n num_node_temporal_layers=args.num_node_temporal_layers,\r\n num_graph_temporal_layers=args.num_graph_temporal_layers,\r\n attention_depth=args.attention_depth,\r\n activation=args.activation,\r\n dropout=args.dropout,\r\n # distill=(False, ) + (args.num_layers - 1) *\r\n # ((True,) if args.distill else (False,)),\r\n distill=args.num_layers * ((True,) if args.distill else (False,)),\r\n initializer=args.initializer,\r\n label_smoothing=args.epsilon_ls\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = DFaSTOnlySpatialForClassification(model_config)\r\n elif args.model == \"LMDA\":\r\n model_config = LMDAConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n depth=9,\r\n channel_depth1=args.num_kernels,\r\n channel_depth2=9,\r\n ave_depth=1,\r\n avepool=5\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = LMDA(model_config)\r\n elif args.model == \"ShallowConvNet\":\r\n model_config = ShallowConvNetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n num_kernels=args.num_kernels\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = ShallowConvNet(model_config)\r\n elif args.model == \"DeepConvNet\":\r\n model_config = DeepConvNetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n num_kernels=25\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = DeepConvNet(model_config)\r\n elif args.model == \"RACNN\":\r\n model_config = RACNNConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class,\r\n k=args.k\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = RACNN(model_config)\r\n elif args.model == \"EEGChannelNet\":\r\n model_config = EEGChannelNetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = EEGChannelNet(model_config)\r\n elif args.model == \"TCANet\":\r\n model_config = TCANetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = TCANet(model_config)\r\n elif args.model == \"TCACNet\":\r\n model_config = TCACNetConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = TCACNet(model_config)\r\n elif args.model == \"SBLEST\":\r\n model_config = SBLESTConfig(node_size=data_config.node_size,\r\n time_series_size=data_config.time_series_size,\r\n node_feature_size=data_config.node_feature_size,\r\n num_classes=data_config.num_class\r\n )\r\n model_config.class_weight = data_config.class_weight\r\n model = SBLEST(model_config)\r\n else:\r\n model = None\r\n model_config = None\r\n if model is not None:\r\n init_parameters(model, model_config)\r\n return model, model_config\r"
},
{
"identifier": "init_optimizer",
"path": "utils/optimizer.py",
"snippet": "def init_optimizer(model: torch.nn.Module, optimizer_config=None) -> torch.optim.Optimizer:\r\n parameters = {\r\n 'lr': optimizer_config.learning_rate,\r\n 'weight_decay': optimizer_config.weight_decay\r\n }\r\n\r\n if optimizer_config.no_weight_decay:\r\n params, _ = get_param_group_no_wd(model,\r\n match_rule=optimizer_config.match_rule,\r\n except_rule=optimizer_config.except_rule)\r\n else:\r\n params = list(model.parameters())\r\n logging.info(f'Parameters [normal] length [{len(params)}]')\r\n\r\n parameters['params'] = params\r\n\r\n optimizer_type = optimizer_config.optimizer\r\n if optimizer_type == 'SGD':\r\n parameters['momentum'] = optimizer_config.momentum\r\n parameters['nesterov'] = optimizer_config.nesterov\r\n return getattr(torch.optim, optimizer_type)(**parameters)\r"
},
{
"identifier": "init_schedule",
"path": "utils/schedule.py",
"snippet": "def init_schedule(optimizer, args, t_total):\r\n if args.schedule == 'cos':\r\n schedule = CosineAnnealingLR(optimizer, eta_min=args.target_learning_rate, T_max=t_total)\r\n elif args.schedule == 'cos_w':\r\n schedule = get_cosine_annealing_schedule_with_warmup(optimizer, eta_max=args.learning_rate,\r\n eta_min=args.target_learning_rate,\r\n num_warmup_steps=args.warmup_steps,\r\n num_training_steps=t_total)\r\n elif args.schedule == 'linear':\r\n schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\r\n num_training_steps=t_total)\r\n elif args.schedule == 'one_cycle':\r\n schedule = OneCycleLR(optimizer,\r\n max_lr=args.max_learning_rate,\r\n epochs=args.num_epochs,\r\n steps_per_epoch=t_total // args.num_epochs,\r\n pct_start=0.2,\r\n div_factor=args.max_learning_rate/args.learning_rate,\r\n final_div_factor=1000)\r\n else:\r\n schedule = None\r\n return schedule\r"
},
{
"identifier": "accuracy",
"path": "utils/accuracy.py",
"snippet": "def accuracy(output: torch.Tensor, target: torch.Tensor, top_k=(1,)) -> List[float]:\r\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\r\n max_k = max(top_k)\r\n batch_size = target.size(0)\r\n\r\n _, predict = output.topk(max_k, 1, True, True)\r\n predict = predict.t()\r\n correct = predict.eq(target.view(1, -1).expand_as(predict))\r\n\r\n res = []\r\n for k in top_k:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size).item())\r\n return res\r"
}
] | import json
import os
import wandb
import logging
import torch
import numpy as np
from timeit import default_timer as timer
from abc import abstractmethod
from torch.nn import functional as F
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.metrics import precision_recall_fscore_support, classification_report
from config import init_model_config
from .optimizer import init_optimizer
from .schedule import init_schedule
from .accuracy import accuracy
from data import *
| 4,034 | if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
# self.model = torch.compile(model, dynamic=True)
self.optimizer = None
self.scheduler = None
self.best_result = None
self.test_result = None
@abstractmethod
def prepare_inputs_kwargs(self, inputs):
return {}
def load_datasets(self):
# datasets = eval(
# f"load_{self.args.dataset}_data")(self.data_config)
datasets = eval(
f"{self.args.dataset}Dataset")(self.data_config, k=self.task_id, subject_id=self.subject_id)
if self.args.do_parallel:
data_loaders = init_distributed_dataloader(self.data_config, datasets)
else:
data_loaders = init_StratifiedKFold_dataloader(self.data_config, datasets)
return data_loaders
def init_components(self):
total = self.args.num_epochs * len(self.data_loaders['train'])
self.optimizer = init_optimizer(self.model, self.args)
self.scheduler = init_schedule(self.optimizer, self.args, total)
def train_epoch(self):
train_dataloader = self.data_loaders['train']
self.model.train()
losses = 0
loss_list = []
for step, inputs in enumerate(train_dataloader):
# with torch.autograd.set_detect_anomaly(True):
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
if self.data_config.dataset == "ZuCo":
loss.backward()
if step % self.data_config.batch_size == self.data_config.batch_size - 1:
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
losses += loss.item()
loss_list.append(loss.item())
wandb.log({'Training loss': loss.item(),
'Learning rate': self.optimizer.param_groups[0]['lr']})
return losses / len(loss_list)
def train(self):
total = self.args.num_epochs*len(self.data_loaders['train'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.data_loaders['train']))
logger.info(" Num Epochs = %d", self.args.num_epochs)
logger.info(" Total train batch size = %d", self.args.batch_size)
logger.info(" warmup steps = %d", self.args.warmup_steps)
logger.info(" Total optimization steps = %d", total)
logger.info(" Save steps = %d", self.args.save_steps)
self.init_components()
if self.args.visualize:
self.visualize()
for epoch in tqdm(range(1, self.args.num_epochs + 1), desc="epoch", ncols=0):
start_time = timer()
train_loss = self.train_epoch()
end_time = timer()
self.data_config.alpha = self.data_config.beta = \
0.5 * (self.args.num_epochs - epoch) / self.args.num_epochs + 0.5
self.test_result = self.evaluate()
msg = f" Train loss: {train_loss:.5f}, Test loss: {self.test_result['Loss']:.5f}," \
f"Epoch time = {(end_time - start_time):.3f}s"
print(msg)
logger.info(msg)
if self.best_result is None or self.best_result['Accuracy'] <= self.test_result['Accuracy']:
self.best_result = self.test_result
self.save_model()
wandb.log({f"Best {k}": v for k, v in self.best_result.items()})
def evaluate(self):
if self.data_config.num_class == 2:
result = self.binary_evaluate()
else:
result = self.multiple_evaluate()
return result
def binary_evaluate(self):
logger.info(f"***** Running evaluation on test{self.task_id} dataset *****")
self.model.eval()
evaluate_dataloader = self.data_loaders['test']
losses = 0
loss_list = []
labels = []
result = {}
preds = []
acc = []
with torch.no_grad():
for inputs in evaluate_dataloader:
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
losses += loss.item()
loss_list.append(loss.item())
# print(f"Evaluate loss: {loss.item():.5f}")
|
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Trainer(object):
def __init__(self, args, local_rank=0, task_id=0, subject_id=0):
self.task_id = task_id
self.args = args
self.local_rank = local_rank
self.subject_id = subject_id
self.data_config = DataConfig(args)
self.data_loaders = self.load_datasets()
model, self.model_config = init_model_config(args, self.data_config)
if args.do_parallel:
# self.model = torch.nn.DataParallel(self.model)
self.device = f'cuda:{self.local_rank}' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.local_rank],
find_unused_parameters=True)
else:
self.device = f'cuda' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
# self.model = torch.compile(model, dynamic=True)
self.optimizer = None
self.scheduler = None
self.best_result = None
self.test_result = None
@abstractmethod
def prepare_inputs_kwargs(self, inputs):
return {}
def load_datasets(self):
# datasets = eval(
# f"load_{self.args.dataset}_data")(self.data_config)
datasets = eval(
f"{self.args.dataset}Dataset")(self.data_config, k=self.task_id, subject_id=self.subject_id)
if self.args.do_parallel:
data_loaders = init_distributed_dataloader(self.data_config, datasets)
else:
data_loaders = init_StratifiedKFold_dataloader(self.data_config, datasets)
return data_loaders
def init_components(self):
total = self.args.num_epochs * len(self.data_loaders['train'])
self.optimizer = init_optimizer(self.model, self.args)
self.scheduler = init_schedule(self.optimizer, self.args, total)
def train_epoch(self):
train_dataloader = self.data_loaders['train']
self.model.train()
losses = 0
loss_list = []
for step, inputs in enumerate(train_dataloader):
# with torch.autograd.set_detect_anomaly(True):
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
if self.data_config.dataset == "ZuCo":
loss.backward()
if step % self.data_config.batch_size == self.data_config.batch_size - 1:
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
losses += loss.item()
loss_list.append(loss.item())
wandb.log({'Training loss': loss.item(),
'Learning rate': self.optimizer.param_groups[0]['lr']})
return losses / len(loss_list)
def train(self):
total = self.args.num_epochs*len(self.data_loaders['train'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.data_loaders['train']))
logger.info(" Num Epochs = %d", self.args.num_epochs)
logger.info(" Total train batch size = %d", self.args.batch_size)
logger.info(" warmup steps = %d", self.args.warmup_steps)
logger.info(" Total optimization steps = %d", total)
logger.info(" Save steps = %d", self.args.save_steps)
self.init_components()
if self.args.visualize:
self.visualize()
for epoch in tqdm(range(1, self.args.num_epochs + 1), desc="epoch", ncols=0):
start_time = timer()
train_loss = self.train_epoch()
end_time = timer()
self.data_config.alpha = self.data_config.beta = \
0.5 * (self.args.num_epochs - epoch) / self.args.num_epochs + 0.5
self.test_result = self.evaluate()
msg = f" Train loss: {train_loss:.5f}, Test loss: {self.test_result['Loss']:.5f}," \
f"Epoch time = {(end_time - start_time):.3f}s"
print(msg)
logger.info(msg)
if self.best_result is None or self.best_result['Accuracy'] <= self.test_result['Accuracy']:
self.best_result = self.test_result
self.save_model()
wandb.log({f"Best {k}": v for k, v in self.best_result.items()})
def evaluate(self):
if self.data_config.num_class == 2:
result = self.binary_evaluate()
else:
result = self.multiple_evaluate()
return result
def binary_evaluate(self):
logger.info(f"***** Running evaluation on test{self.task_id} dataset *****")
self.model.eval()
evaluate_dataloader = self.data_loaders['test']
losses = 0
loss_list = []
labels = []
result = {}
preds = []
acc = []
with torch.no_grad():
for inputs in evaluate_dataloader:
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
losses += loss.item()
loss_list.append(loss.item())
# print(f"Evaluate loss: {loss.item():.5f}")
| top1 = accuracy(outputs.logits, input_kwargs['labels'][:, 1])[0]
| 3 | 2023-11-07 13:57:36+00:00 | 8k |
YihePang/DisoFLAG | model_running.py | [
{
"identifier": "Args_config",
"path": "args.py",
"snippet": "class Args_config: \n\tdef __init__(self):\n\t\tself.use_gpu = True\n\t\tself.max_seq_length = 128\n\n\t\tself.feature_dim = 1024\n\t\tself.encoder_hidden = 512\n\n\t\tself.decoder_hidden = 1024\n\n\t\tself.decoder_dropout = 0.3\n\n\t\tself.model_path = './saved_model'\n\t\tself.epochs = 50\n\t\tself.batch_size = 16\n\t\tself.learning_rate = 0.00005"
},
{
"identifier": "data_2_samples",
"path": "prepare_model_data.py",
"snippet": "def data_2_samples(args, data_file_name, is_slice):\n\n\tseq_id,seq,seq_label_IDP,seq_label_F1,seq_label_F2,seq_label_F3,seq_label_F4,seq_label_F5,seq_label_F6,seq_T5_feature = file_2_data(data_file_name)\n\n\t# 标签处理\n\tres_mask_0 = residue_mask(seq_label_IDP) \n\tres_mask_1 = residue_mask(seq_label_F1) \n\tres_mask_2 = residue_mask(seq_label_F2) \n\tres_mask_3 = residue_mask(seq_label_F3) \n\tres_mask_4 = residue_mask(seq_label_F4) \n\tres_mask_5 = residue_mask(seq_label_F5) \n\tres_mask_6 = residue_mask(seq_label_F6) \n\n\tseq_mask = sequence_mask(seq) # \n\n\tseq_label_0 = lable_2_value(seq_label_IDP) \n\tseq_label_1 = lable_2_value(seq_label_F1) \n\tseq_label_2 = lable_2_value(seq_label_F2) \n\tseq_label_3 = lable_2_value(seq_label_F3) \n\tseq_label_4 = lable_2_value(seq_label_F4) \n\tseq_label_5 = lable_2_value(seq_label_F5) \n\tseq_label_6 = lable_2_value(seq_label_F6) \n\n\n\t \n\tif is_slice == True:\n\t\tseq_id,seq,seq_label_0,seq_label_1,seq_label_2,seq_label_3,seq_label_4,seq_label_5,seq_label_6,seq_T5_feature,res_mask_0,res_mask_1,res_mask_2,res_mask_3,res_mask_4,res_mask_5,res_mask_6,seq_mask = slice_data(seq_id,seq,seq_label_0,seq_label_1,seq_label_2,seq_label_3,seq_label_4,seq_label_5,seq_label_6,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tseq_T5_feature,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tres_mask_0,res_mask_1,res_mask_2,res_mask_3,res_mask_4,res_mask_5,res_mask_6,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tseq_mask,args.max_seq_length)\n\t\t# print(\"after slice lengths: \",len(seq_id))\n\n\t# padding\n\tpad_seq_label_0 = seq_lable_padding(seq_label_0, args.max_seq_length)\n\n\tpad_seq_label_1 = seq_lable_padding(seq_label_1, args.max_seq_length)\n\tpad_seq_label_2 = seq_lable_padding(seq_label_2, args.max_seq_length)\n\tpad_seq_label_3 = seq_lable_padding(seq_label_3, args.max_seq_length)\n\n\tpad_seq_label_4 = seq_lable_padding(seq_label_4, args.max_seq_length)\n\tpad_seq_label_5 = seq_lable_padding(seq_label_5, args.max_seq_length)\n\tpad_seq_label_6 = seq_lable_padding(seq_label_6, args.max_seq_length)\n\n\tpad_seq_T5_feature = seq_feature_padding(seq_T5_feature, args.max_seq_length)\n\tpad_res_mask_0 = mask_padding(res_mask_0,args.max_seq_length)\n\n\tpad_res_mask_1 = mask_padding(res_mask_1,args.max_seq_length)\n\tpad_res_mask_2 = mask_padding(res_mask_2,args.max_seq_length)\n\tpad_res_mask_3 = mask_padding(res_mask_3,args.max_seq_length)\n\n\tpad_res_mask_4 = mask_padding(res_mask_4,args.max_seq_length)\n\tpad_res_mask_5 = mask_padding(res_mask_5,args.max_seq_length)\n\tpad_res_mask_6 = mask_padding(res_mask_6,args.max_seq_length)\n\n\tpad_seq_mask = mask_padding(seq_mask,args.max_seq_length)\n\n\n\tdata_samples = []\n\tfor i in range(len(seq_id)):\n\t\tone_sample = [] \n\n\t\tone_sample.append(seq_id[i]) \n\t\tone_sample.append(seq[i]) \n\t\t\n\t\t# label\n\t\tone_sample.append(pad_seq_label_0[i]) # (padding)-----------------------2 IDP\n\t\tone_sample.append(pad_seq_label_1[i]) # (padding)-----------------------3 PB\n\t\tone_sample.append(pad_seq_label_2[i]) # (padding)-----------------------4 DB\n\t\tone_sample.append(pad_seq_label_3[i]) # (padding)-----------------------5 RB\n\t\tone_sample.append(pad_seq_label_4[i]) # (padding)-----------------------6 IB\n\t\tone_sample.append(pad_seq_label_5[i]) # (padding)-----------------------7 LB\n\t\tone_sample.append(pad_seq_label_6[i]) # (padding)-----------------------8 Link\n\n\t\t# length\n\t\tone_sample.append(len(seq[i])) # -----------------------------9\n\n\t\t\n\t\tone_sample.append(pad_seq_T5_feature[i]) # (padding)--------------------10\n\t\t# one_sample.append(pad_seq_BERT_feature[i]) # (padding)------------------11\n\t\t# one_sample.append(pad_seq_IDP_feature[i]) # (padding)-------------------12\n\n\t \n\t\tone_sample.append(seq_label_0[i]) # ---------13\n\t\tone_sample.append(seq_label_1[i]) # ---------14\n\t\tone_sample.append(seq_label_2[i]) # ---------15\n\t\tone_sample.append(seq_label_3[i]) # ---------16\n\t\tone_sample.append(seq_label_4[i]) # ---------17\n\t\tone_sample.append(seq_label_5[i]) # ---------18\n\t\tone_sample.append(seq_label_6[i]) # ---------19\n\n\t\t# mask\n\t\tone_sample.append(pad_res_mask_0[i]) #0,1 mask----------------------20\n\t\tone_sample.append(pad_res_mask_1[i]) #0,1 mask----------------------21\n\t\tone_sample.append(pad_res_mask_2[i]) #0,1 mask----------------------22\n\t\tone_sample.append(pad_res_mask_3[i]) #0,1 mask----------------------23\n\t\tone_sample.append(pad_res_mask_4[i]) #0,1 mask----------------------24\n\t\tone_sample.append(pad_res_mask_5[i]) #0,1 mask----------------------25\n\t\tone_sample.append(pad_res_mask_6[i]) #0,1 mask----------------------26\n \n\t\tone_sample.append(pad_seq_mask[i]) #seq -----------------------27\n\n\t\tdata_samples.append(one_sample)\n\n\n\treturn data_samples"
},
{
"identifier": "Batches_data",
"path": "prepare_model_data.py",
"snippet": "def Batches_data(data_samples, batch_size, is_train): #all data samples \n\t# if is_train == True:\n\t# \trandom.shuffle(data_samples)\n\tbatches = []\n\tdata_len = len(data_samples)\n\tbatch_nums = int(data_len/batch_size) \n\tdef genNextSamples():\n\t\tfor i in range(0, batch_nums*batch_size, batch_size):\n\t\t\tyield data_samples[i: i + batch_size]\n\t\tif data_len % batch_size != 0: \n\t\t\tlast_num = data_len - batch_nums*batch_size\n\t\t\tup_num = batch_size - last_num\n\t\t\tl1 = data_samples[batch_nums*batch_size : data_len]\n\t\t\tl2 = data_samples[0: up_num]\n\t\t\tyield l1+l2\n\t\n\tfor one_data_samples in genNextSamples():\n\t\tone_batch = one_batch_data(one_data_samples)\n\t\tbatches.append(one_batch)\t\n\treturn batches "
},
{
"identifier": "load_file_2_data",
"path": "load_data.py",
"snippet": "def load_file_2_data(file_path):\n\tloadfile = open(file_path,\"r\") \t\n\tload_f = []\n\tfor line in loadfile:\n\t\tline=line.strip('\\n')\n\t\tload_f.append(line)\n\tloadfile.close()\n\n\tload_data = []\n\tfor i in range(len(load_f)):\n\t\tif i % 2 == 0:\n\t\t\tload_data.append(load_f[i:i+2]) #one data: [0]--id [1]--seq \n\t# print(\"load_file: \",file_path,\" data length: \",len(load_data)) \n\treturn load_data"
},
{
"identifier": "Seq2FUN",
"path": "model.py",
"snippet": "class Seq2FUN(nn.Module):\n\tdef __init__(self,args):\t\t\t\n\t\tsuper().__init__()\n\t\tself.model_name = 'Model'\n\t\tself.args = args\n\t\tself.encoder = Encoder(self.args)\n\t\tself.decoder = Decoder(self.args)\n\t\tself.Graph_decoder = Graph_decoder(self.args)\n\n\t\t\n\t\tself.IDR_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\n\t\tself.PB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\t\tself.DB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\t\tself.RB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\t\tself.IB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\t\tself.LB_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\t\tself.Link_trans = nn.Linear(self.args.decoder_hidden, self.args.decoder_hidden)\n\n\t\t\n\t\tself.IDP_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\n\t\tself.PB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\t\tself.DB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\t\tself.RB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\t\tself.IB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\t\tself.LB_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\t\tself.Link_cal_prob = nn.Linear(in_features=128, out_features=1, bias =True)\n\n\n\t\tself.activate = nn.Sigmoid()\n\n\n\tdef forward(self, input_feature):\n\t\t# print(\"input_feature:\",input_feature.shape) # [B, L, 1024]\n\t\t# Bi-GRU Encoder\n\t\tencoder_outputs, encoder_hiddens = self.encoder(input_feature)\n\n\t\t# Decoder_attention\n\t\tdecoder_outputs = self.decoder(encoder_outputs, encoder_hiddens) # [B, L, 1024]\n\n\t\t\n\t\t# IDR feature\n\t\tIDR_vec = self.IDR_trans(decoder_outputs) # [B, L, 1024]\n\n\t\tPB_vec = self.PB_trans(decoder_outputs)\n\t\tDB_vec = self.DB_trans(decoder_outputs)\n\t\tRB_vec = self.RB_trans(decoder_outputs)\n\t\tIB_vec = self.IB_trans(decoder_outputs)\n\t\tLB_vec = self.LB_trans(decoder_outputs)\n\t\tLink_vec = self.Link_trans(decoder_outputs)\n\n\t\t# Gragh decoder\n\t\tIDR_F_vec, PB_F_vec, DB_F_vec, RB_F_vec, IB_F_vec, LB_F_vec, Link_F_vec, Graph_Ws, Graph_bs, Graph_adjs = self.Graph_decoder(IDR_vec, PB_vec, DB_vec, RB_vec, IB_vec, LB_vec, Link_vec)\n\n\t\t# cal_probs\n\t\tIDR_probs = t.squeeze(self.activate(self.IDP_cal_prob(IDR_F_vec))) # [B, L]\n\n\t\tPB_probs = t.squeeze(self.activate(self.PB_cal_prob(PB_F_vec))) # [B, L]\n\t\tDB_probs = t.squeeze(self.activate(self.DB_cal_prob(DB_F_vec))) # [B, L]\n\t\tRB_probs = t.squeeze(self.activate(self.RB_cal_prob(RB_F_vec))) # [B, L]\n\t\tIB_probs = t.squeeze(self.activate(self.IB_cal_prob(IB_F_vec))) # [B, L]\n\t\tLB_probs = t.squeeze(self.activate(self.LB_cal_prob(LB_F_vec))) # [B, L]\n\t\tLink_probs = t.squeeze(self.activate(self.Link_cal_prob(Link_F_vec))) # [B, L]\n\n\t\treturn IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs"
},
{
"identifier": "write_2_file",
"path": "evaluator.py",
"snippet": "def write_2_file(data_file, data_samples, data_batches, IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs, file_name,output_type):\n\tbatch_size = np.array(data_batches[0].seq_label_0).shape[0]\n\tmax_length = np.array(data_batches[0].seq_label_0).shape[1]\n\tslice_length = len(data_samples)\n\n\n\tpred_logs_0 = []\n\tpred_logs_1 = []\n\tpred_logs_2 = []\n\tpred_logs_3 = []\n\tpred_logs_4 = []\n\tpred_logs_5 = []\n\tpred_logs_6 = []\n\tfor b in range(len(IDR_probs)):\n\t\t# IDR\n\t\tpred_logs_0 += list(IDR_probs[b])\n\t\t# PB\n\t\tpred_logs_1 += list(PB_probs[b])\n\t\t# DB\n\t\tpred_logs_2 += list(DB_probs[b])\n\t\t# RB\n\t\tpred_logs_3 += list(RB_probs[b])\n\t\t# IB\n\t\tpred_logs_4 += list(IB_probs[b])\n\t\t# LB\n\t\tpred_logs_5 += list(LB_probs[b])\n\t\t# Link\n\t\tpred_logs_6 += list(Link_probs[b])\n\tpred_logs_0 = pred_logs_0[:slice_length]\n\tpred_logs_1 = pred_logs_1[:slice_length]\n\tpred_logs_2 = pred_logs_2[:slice_length]\n\tpred_logs_3 = pred_logs_3[:slice_length]\n\tpred_logs_4 = pred_logs_4[:slice_length]\n\tpred_logs_5 = pred_logs_5[:slice_length]\n\tpred_logs_6 = pred_logs_6[:slice_length]\n\n\n\tpred_seq_ids = [] \n\tfor d in range(len(data_batches)):\n\t\tbatch_data = data_batches[d] \n\t\tfor i in range(len(batch_data.seq_id)): #[batch_size]\n\t\t\tpred_seq_ids.append(str(batch_data.seq_id[i]).replace('\\r','')) # pred_seq_ids\n\tpred_seq_ids = pred_seq_ids[:slice_length]\n\torg_ids = list(set(pred_seq_ids))\n\n\n\n\torg_seq_pred_0 = []\n\torg_seq_pred_1 = []\n\torg_seq_pred_2 = []\n\torg_seq_pred_3 = []\n\torg_seq_pred_4 = []\n\torg_seq_pred_5 = []\n\torg_seq_pred_6 = []\n\tfor i in range(len(org_ids)):\n\t\tfind_id = org_ids[i]\n\n\t\tone_pred_0 = []\n\t\tone_pred_1 = []\n\t\tone_pred_2 = []\n\t\tone_pred_3 = []\n\t\tone_pred_4 = []\n\t\tone_pred_5 = []\n\t\tone_pred_6 = []\n\t\tfor j in range(len(pred_seq_ids)):\n\t\t\tif pred_seq_ids[j] == find_id:\n\t\t\t\tone_pred_0 += list(pred_logs_0[j])\n\t\t\t\tone_pred_1 += list(pred_logs_1[j])\n\t\t\t\tone_pred_2 += list(pred_logs_2[j])\n\t\t\t\tone_pred_3 += list(pred_logs_3[j])\n\t\t\t\tone_pred_4 += list(pred_logs_4[j])\n\t\t\t\tone_pred_5 += list(pred_logs_5[j])\n\t\t\t\tone_pred_6 += list(pred_logs_6[j])\n\t\torg_seq_pred_0.append([find_id,one_pred_0])\n\t\torg_seq_pred_1.append([find_id,one_pred_1])\n\t\torg_seq_pred_2.append([find_id,one_pred_2])\n\t\torg_seq_pred_3.append([find_id,one_pred_3])\n\t\torg_seq_pred_4.append([find_id,one_pred_4])\n\t\torg_seq_pred_5.append([find_id,one_pred_5])\n\t\torg_seq_pred_6.append([find_id,one_pred_6])\n\n\n\tpred_final_ordered_0 = []\n\tpred_final_ordered_1 = []\n\tpred_final_ordered_2 = []\n\tpred_final_ordered_3 = []\n\tpred_final_ordered_4 = []\n\tpred_final_ordered_5 = []\n\tpred_final_ordered_6 = []\n\tfor i in range(len(data_file)): \n\t\tfind_id = str(str(data_file[i][0]).replace('>','')).replace('\\r','')\n\t\tfor j in range(len(org_seq_pred_0)):\n\t\t\tif org_seq_pred_0[j][0] == find_id:\n\t\t\t\tpred_final_ordered_0.append(org_seq_pred_0[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_1.append(org_seq_pred_1[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_2.append(org_seq_pred_2[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_3.append(org_seq_pred_3[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_4.append(org_seq_pred_4[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_5.append(org_seq_pred_5[j][-1][:len(data_file[i][1])])\n\t\t\t\tpred_final_ordered_6.append(org_seq_pred_6[j][-1][:len(data_file[i][1])])\n\n\t\n\twrite_file = open(file_name,\"w\")\n\n\tfor i in range(len(data_file)):\n\t\twrite_file.write(data_file[i][0]+'\\n')\n\t\twrite_file.write(data_file[i][1]+'\\n')\n\t\tone_seq_len = len(data_file[i][1].replace('\\r',''))\n\t\tpred_0 = [round(j,4) for j in pred_final_ordered_0[i]]\n\t\tpred_1 = [round(j,4) for j in pred_final_ordered_1[i]]\n\t\tpred_2 = [round(j,4) for j in pred_final_ordered_2[i]]\n\t\tpred_3 = [round(j,4) for j in pred_final_ordered_3[i]]\n\t\tpred_4 = [round(j,4) for j in pred_final_ordered_4[i]]\n\t\tpred_5 = [round(j,4) for j in pred_final_ordered_5[i]]\n\t\tpred_6 = [round(j,4) for j in pred_final_ordered_6[i]]\n\t\tpred_0 = pred_0[0:one_seq_len]\n\t\tpred_1 = pred_1[0:one_seq_len]\n\t\tpred_2 = pred_2[0:one_seq_len]\n\t\tpred_3 = pred_3[0:one_seq_len]\n\t\tpred_4 = pred_4[0:one_seq_len]\n\t\tpred_5 = pred_5[0:one_seq_len]\n\t\tpred_6 = pred_6[0:one_seq_len]\n\n\t\tif output_type == 'b':\n\t\t\t# best ROC performance\n\t\t\tpred_0 = [1 if p > 0.2340 else 0 for p in pred_0]\n\t\t\tpred_1 = [1 if p > 0.1678 else 0 for p in pred_1]\n\t\t\tpred_2 = [1 if p > 0.0163 else 0 for p in pred_2]\n\t\t\tpred_3 = [1 if p > 0.006 else 0 for p in pred_3]\n\t\t\tpred_4 = [1 if p > 0.0011 else 0 for p in pred_4]\n\t\t\tpred_5 = [1 if p > 0.0109 else 0 for p in pred_5]\n\t\t\tpred_6 = [1 if p > 0.0254 else 0 for p in pred_6]\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_0))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_1))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_2))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_3))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_4))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_5))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\"\".join(str(j) for j in pred_6))\n\t\t\twrite_file.write('\\n')\n\t\telse:\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_0))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_1))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_2))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_3))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_4))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_5))\n\t\t\twrite_file.write('\\n')\n\t\t\twrite_file.write(\",\".join(str(j) for j in pred_6))\n\t\t\twrite_file.write('\\n')\n\t\t\n\tprint(\"Find results : \",file_name)\n\twrite_file.close()"
}
] | import numpy as np
import random
import os
import torch as t
import sys
from args import Args_config
from prepare_model_data import data_2_samples, Batches_data
from load_data import load_file_2_data
from torch import nn
from model import Seq2FUN
from evaluator import write_2_file | 5,843 | # -*- coding: utf-8 -*-
# @Author: Yihe Pang
# @Date: 2023-06-13 10:08:51
# @Last Modified by: Yihe Pang
# @Last Modified time: 2023-06-14 22:43:30
def FLAG_model_running(input_data_file, output_file_name, output_type):
args = Args_config()
test_data = data_2_samples(args = args,
data_file_name = input_data_file,
is_slice = True)
for root, dirs, files in os.walk(args.model_path):
for one_file in files:
model_file = args.model_path+'/'+one_file
# print("model_file:",model_file)
model = t.load(model_file, map_location='cpu')
# print("Model : ------",model)
model.eval()
if len(test_data) < args.batch_size:
input_data = []
for i in range(args.batch_size):
if i < len(test_data):
input_data.append(test_data[i])
else:
input_data.append(test_data[0])
else:
input_data = test_data
test_batches = Batches_data(test_data, args.batch_size, is_train=False)
IDR_probs = []
PB_probs = []
DB_probs = []
RB_probs = []
IB_probs = []
LB_probs = []
Link_probs = []
for t_batch in test_batches: #一个batch
t_input_featues = t.tensor(np.array(t_batch.seq_T5_feature))
# seq_mask
one_seq_mask = t.tensor(np.array(t_batch.seq_mask), dtype=t.float32)
one_IDR_probs, one_PB_probs, one_DB_probs, one_RB_probs, one_IB_probs, one_LB_probs, one_Link_probs = model(t_input_featues)
# logits
one_IDR_logits = one_IDR_probs * one_seq_mask
one_PB_logits = one_PB_probs * one_seq_mask
one_DB_logits = one_DB_probs * one_seq_mask
one_RB_logits = one_RB_probs * one_seq_mask
one_IB_logits = one_IB_probs * one_seq_mask
one_LB_logits = one_LB_probs * one_seq_mask
one_Link_logits = one_Link_probs * one_seq_mask
IDR_probs.append(one_IDR_probs.detach().numpy())
PB_probs.append(one_PB_logits.detach().numpy())
DB_probs.append(one_DB_logits.detach().numpy())
RB_probs.append(one_RB_logits.detach().numpy())
IB_probs.append(one_IB_logits.detach().numpy())
LB_probs.append(one_LB_logits.detach().numpy())
Link_probs.append(one_Link_logits.detach().numpy())
test_file = load_file_2_data(input_data_file)
| # -*- coding: utf-8 -*-
# @Author: Yihe Pang
# @Date: 2023-06-13 10:08:51
# @Last Modified by: Yihe Pang
# @Last Modified time: 2023-06-14 22:43:30
def FLAG_model_running(input_data_file, output_file_name, output_type):
args = Args_config()
test_data = data_2_samples(args = args,
data_file_name = input_data_file,
is_slice = True)
for root, dirs, files in os.walk(args.model_path):
for one_file in files:
model_file = args.model_path+'/'+one_file
# print("model_file:",model_file)
model = t.load(model_file, map_location='cpu')
# print("Model : ------",model)
model.eval()
if len(test_data) < args.batch_size:
input_data = []
for i in range(args.batch_size):
if i < len(test_data):
input_data.append(test_data[i])
else:
input_data.append(test_data[0])
else:
input_data = test_data
test_batches = Batches_data(test_data, args.batch_size, is_train=False)
IDR_probs = []
PB_probs = []
DB_probs = []
RB_probs = []
IB_probs = []
LB_probs = []
Link_probs = []
for t_batch in test_batches: #一个batch
t_input_featues = t.tensor(np.array(t_batch.seq_T5_feature))
# seq_mask
one_seq_mask = t.tensor(np.array(t_batch.seq_mask), dtype=t.float32)
one_IDR_probs, one_PB_probs, one_DB_probs, one_RB_probs, one_IB_probs, one_LB_probs, one_Link_probs = model(t_input_featues)
# logits
one_IDR_logits = one_IDR_probs * one_seq_mask
one_PB_logits = one_PB_probs * one_seq_mask
one_DB_logits = one_DB_probs * one_seq_mask
one_RB_logits = one_RB_probs * one_seq_mask
one_IB_logits = one_IB_probs * one_seq_mask
one_LB_logits = one_LB_probs * one_seq_mask
one_Link_logits = one_Link_probs * one_seq_mask
IDR_probs.append(one_IDR_probs.detach().numpy())
PB_probs.append(one_PB_logits.detach().numpy())
DB_probs.append(one_DB_logits.detach().numpy())
RB_probs.append(one_RB_logits.detach().numpy())
IB_probs.append(one_IB_logits.detach().numpy())
LB_probs.append(one_LB_logits.detach().numpy())
Link_probs.append(one_Link_logits.detach().numpy())
test_file = load_file_2_data(input_data_file) | write_2_file(test_file, test_data, test_batches, IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs, output_file_name, output_type) | 5 | 2023-11-09 15:08:24+00:00 | 8k |
BouncyKoishi/ChuCaoQi-Bot | plugins/spellcard_battle.py | [
{
"identifier": "Battle",
"path": "plugins/scBattle/scBattleObj.py",
"snippet": "class Battle:\n def __init__(self, creatorId, groupId) -> None:\n self.creatorId = creatorId\n self.joinerId = None\n self.creator: Battler or None = None\n self.joiner: Battler or None = None\n self.lastTurnInfoImg = None\n self.groupId = groupId\n self.gameRound = None\n self.stateCardId = 0\n self.spellCardSettled = []\n self.turnInfoMsg, self.creatorCardMsg, self.joinerCardMsg = \"\", \"\", \"\"\n self.cAtk, self.jAtk, self.cHurt, self.jHurt = 0, 0, 0, 0\n\n async def setCreator(self):\n creatorName = await getBattlerName(self.creatorId, self.groupId)\n self.creator = Battler(self.creatorId, creatorName)\n\n async def joinBattle(self, joinerId) -> None:\n self.joinerId = joinerId\n joinerName = await getBattlerName(self.joinerId, self.groupId)\n self.joiner = Battler(self.joinerId, joinerName)\n self.creator.setEnemy(self.joiner)\n self.joiner.setEnemy(self.creator)\n self.gameRound = 0\n\n async def setSingleBattleEnemy(self, enemyName, enemyCardList):\n self.joinerId = -1\n self.joiner = Battler(self.joinerId, enemyName)\n self.joiner.cardIdList = enemyCardList\n self.creator.setEnemy(self.joiner)\n self.joiner.setEnemy(self.creator)\n self.spellCardSettled.append(self.joinerId)\n self.gameRound = 0\n\n def gameStart(self) -> None:\n self.creator.setNewMainCard()\n self.joiner.setNewMainCard()\n self.roundStart()\n self.creatorCardMsg = self.creator.getCardDescribe()\n self.joinerCardMsg = self.joiner.getCardDescribe()\n self.turnInfoMsg += self.creator.nowCard.onCardSet()\n self.turnInfoMsg += self.joiner.nowCard.onCardSet()\n self.turnInfoMsg += self.creator.runEffect(\"onCardSet\")\n self.turnInfoMsg += self.joiner.runEffect(\"onCardSet\")\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n\n def roundStart(self):\n self.gameRound += 1\n self.turnInfoMsg += f'-- 宣言回目 {self.gameRound} --\\n'\n self.turnInfoMsg += f'{self.creator.name} 当前血量:{self.creator.nowHp}\\n'\n self.turnInfoMsg += f'{self.joiner.name} 当前血量:{self.joiner.nowHp}\\n'\n \n def turnStart(self):\n self.turnInfoMsg += self.creator.nowCard.onTurnStart()\n self.turnInfoMsg += self.joiner.nowCard.onTurnStart()\n self.turnInfoMsg += self.creator.runEffect('onTurnStart')\n self.turnInfoMsg += self.joiner.runEffect('onTurnStart')\n\n def turnGetBasePoint(self):\n self.cAtk, pointMsg1 = self.creator.getPoints()\n self.jAtk, pointMsg2 = self.joiner.getPoints()\n self.turnInfoMsg += (pointMsg1 + pointMsg2)\n\n def turnHurtValueCalc(self):\n self.cHurt, hurtMsg1 = self.creator.calcHurt(self.jAtk)\n self.jHurt, hurtMsg2 = self.joiner.calcHurt(self.cAtk)\n self.turnInfoMsg += (hurtMsg1 + hurtMsg2)\n\n def turnHpChange(self):\n self.turnInfoMsg += self.creator.battleHurt(self.cHurt)\n self.turnInfoMsg += self.joiner.battleHurt(self.jHurt)\n\n def turnEnd(self):\n self.turnInfoMsg += self.creator.runEffect('onTurnEnd')\n self.turnInfoMsg += self.joiner.runEffect('onTurnEnd')\n self.turnInfoMsg += self.creator.nowCard.onTurnEnd()\n self.turnInfoMsg += self.joiner.nowCard.onTurnEnd()\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n self.cleanTurnTempData()\n\n def cleanTurnTempData(self):\n self.creator.cleanTurnTempData()\n self.joiner.cleanTurnTempData()\n self.cAtk, self.jAtk, self.cHurt, self.jHurt = 0, 0, 0, 0\n\n def cardBreakJudge(self):\n creatorBreak = self.creator.shouldChangeCard()\n joinerBreak = self.joiner.shouldChangeCard()\n if creatorBreak and joinerBreak:\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n self.turnInfoMsg += f'{self.creator.name} 当前符卡被击破!\\n'\n self.turnInfoMsg += self.creator.runEffect(\"onCardBreak\")\n self.turnInfoMsg += self.joiner.runEffect(\"onEnemyCardBreak\")\n self.turnInfoMsg += self.creator.nowCard.onCardBreak()\n self.turnInfoMsg += f'{self.joiner.name} 当前符卡被击破!\\n'\n self.turnInfoMsg += self.joiner.runEffect(\"onCardBreak\")\n self.turnInfoMsg += self.creator.runEffect(\"onEnemyCardBreak\")\n self.turnInfoMsg += self.joiner.nowCard.onCardBreak()\n self.lastTurnInfoImg = self.getTurnInfoImg()\n self.cleanTurnTempData()\n time.sleep(4)\n gameContinueA = self.creator.setNewMainCard()\n gameContinueB = self.joiner.setNewMainCard()\n if not gameContinueA or not gameContinueB:\n return True, True\n self.roundStart()\n self.creatorCardMsg = self.creator.getCardDescribe()\n self.joinerCardMsg = self.joiner.getCardDescribe()\n self.turnInfoMsg += self.creator.nowCard.onCardSet()\n self.turnInfoMsg += self.joiner.nowCard.onCardSet()\n self.turnInfoMsg += self.creator.runEffect(\"onCardSet\")\n self.turnInfoMsg += self.joiner.runEffect(\"onCardSet\")\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n return True, False\n elif creatorBreak:\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n self.turnInfoMsg += f'{self.creator.name} 当前符卡被击破!\\n'\n self.turnInfoMsg += self.creator.runEffect(\"onCardBreak\")\n self.turnInfoMsg += self.joiner.runEffect(\"onEnemyCardBreak\")\n self.turnInfoMsg += self.creator.nowCard.onCardBreak()\n self.lastTurnInfoImg = self.getTurnInfoImg()\n self.cleanTurnTempData()\n time.sleep(4)\n gameContinue = self.creator.setNewMainCard()\n if not gameContinue:\n return True, True\n self.roundStart()\n self.creatorCardMsg = self.creator.getCardDescribe()\n self.turnInfoMsg += self.creator.nowCard.onCardSet()\n self.turnInfoMsg += self.creator.runEffect(\"onCardSet\")\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n return True, False\n elif joinerBreak:\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n self.turnInfoMsg += f'{self.joiner.name} 当前符卡被击破!\\n'\n self.turnInfoMsg += self.joiner.runEffect(\"onCardBreak\")\n self.turnInfoMsg += self.creator.runEffect(\"onEnemyCardBreak\")\n self.turnInfoMsg += self.joiner.nowCard.onCardBreak()\n self.lastTurnInfoImg = self.getTurnInfoImg()\n self.cleanTurnTempData()\n time.sleep(4)\n gameContinue = self.joiner.setNewMainCard()\n if not gameContinue:\n return True, True\n self.roundStart()\n self.joinerCardMsg = self.joiner.getCardDescribe()\n self.turnInfoMsg += self.joiner.nowCard.onCardSet()\n self.turnInfoMsg += self.joiner.runEffect(\"onCardSet\")\n self.turnInfoMsg += f'-------------------------------------------------------\\n'\n return True, False\n return False, False\n\n def getTurnInfoImg(self):\n sizeBig = 25\n sizeMid = 20\n sizeSmall = 15\n rowSpacing = 3\n width = 900\n margin = 20\n font1 = ImageFont.truetype(\"HarmonyOS_Sans_SC_Bold\", sizeBig)\n font2 = ImageFont.truetype(\"HarmonyOS_Sans_SC_Regular\", sizeMid)\n font3 = ImageFont.truetype(\"HarmonyOS_Sans_SC_Light\", sizeSmall)\n baseHeight = sizeBig + sizeMid * 6 + rowSpacing * 6 + margin * 2\n turnInfoMsgLineCount = self.turnInfoMsg.count('\\n') + 1\n turnInfoMsgHeight = (sizeSmall + rowSpacing) * turnInfoMsgLineCount + margin * 2\n totalHeight = baseHeight + turnInfoMsgHeight\n\n img = Image.new(mode=\"RGB\", size=(width, totalHeight), color=(255, 255, 255))\n draw = ImageDraw.Draw(img)\n draw.text((margin, margin), self.creator.name, font=font1, fill=(96, 16, 16))\n draw.text((margin, margin + sizeBig + rowSpacing), self.creatorCardMsg, font=font2, fill=(96, 16, 16))\n draw.text((width / 2, margin), self.joiner.name, font=font1, fill=(16, 16, 96))\n draw.text((width / 2, margin + sizeBig + rowSpacing), self.joinerCardMsg, font=font2, fill=(16, 16, 96))\n draw.line(xy=(margin, baseHeight, width - margin, baseHeight), fill=(100, 100, 100), width=2)\n draw.text((margin, baseHeight + margin), self.turnInfoMsg, font=font3, fill=(0, 0, 0))\n img.save(\"temp.jpg\", format=\"JPEG\", quality=95)\n self.turnInfoMsg = \"\"\n return getImgBase64(r\"temp.jpg\")\n\n def endGameCheck(self):\n isEndGame = False\n loserName = []\n if self.creator.shouldEnd():\n isEndGame = True\n loserName.append(self.creator.name)\n if self.joiner.shouldEnd():\n isEndGame = True\n loserName.append(self.joiner.name)\n time.sleep(4)\n return isEndGame, loserName"
},
{
"identifier": "Battler",
"path": "plugins/scBattle/scBattlerObj.py",
"snippet": "class Battler:\n def __init__(self, userId, userName) -> None:\n self.id = userId\n self.name = userName\n self.cardIdList = [0, 0, 0, 0, 0]\n self.nowCardOrder = 0\n self.states = []\n self.effects = []\n self.nowHp = 0\n self.nowCard = None\n self.enemy = None\n self.attack, self.defence, self.dodge = 0, 0, 0\n self.dodSuccess, self.defSuccess = None, None\n\n def setEnemy(self, enemy):\n self.enemy = enemy\n\n def battleHurt(self, value):\n value, beforeHurtInfo = self.runEffect(\"beforeHurt\", value)\n self.nowHp -= value\n _, commonHurtInfo = self.runEffect(\"onHurt\", value)\n _, battleHurtInfo = self.runEffect(\"onBattleHurt\", value)\n return beforeHurtInfo + commonHurtInfo + battleHurtInfo\n\n def effectHurt(self, value):\n value, beforeHurtInfo = self.runEffect(\"beforeHurt\", value)\n self.nowHp -= value\n _, commonHurtInfo = self.runEffect(\"onHurt\", value)\n _, effectHurtInfo = self.runEffect(\"onEffectHurt\", value)\n return beforeHurtInfo + commonHurtInfo + effectHurtInfo\n\n def heal(self, value):\n value, healInfo = self.runEffect(\"onHealValueCalc\", value)\n self.nowHp += value\n self.nowHp = min(self.nowHp, self.nowCard.cardHp)\n healInfo += self.runEffect(\"onHeal\")\n return healInfo\n\n def appendEffect(self, effectId, effectAmount):\n if not effectId:\n return\n effectIdList = [effect.id for effect in self.effects] if self.effects else []\n if effectId in effectIdList:\n self.effects[effectIdList.index(effectId)].stackEffect(effectAmount)\n else:\n effect = utils.getEffectObjById(effectId, effectAmount)\n effect.setPlayerInfo(self, self.enemy)\n self.effects.append(effect)\n\n def appendBorder(self, effectId, borderTurn, borderStrength):\n if not effectId:\n return\n effectIdList = [effect.id for effect in self.effects] if self.effects else []\n if effectId in effectIdList:\n self.effects[effectIdList.index(effectId)].stackEffect(borderTurn)\n else:\n border = utils.getEffectObjById(effectId, borderTurn)\n border.setPlayerInfo(self, self.enemy)\n border.setBorderStrength(borderStrength)\n self.effects.append(border)\n\n def removeEffect(self, effectId, effectAmount=0):\n if not effectId:\n return\n effectIdList = [effect.id for effect in self.effects] if self.effects else []\n if effectId in effectIdList:\n if effectAmount == 0:\n self.effects.pop(effectIdList.index(effectId))\n else:\n self.effects[effectIdList.index(effectId)].reduceEffect(effectAmount)\n self.removeEmptyEffect()\n\n def removeEmptyEffect(self):\n for effect in self.effects:\n if effect.effectAmount == 0:\n self.effects.remove(effect)\n\n def runEffect(self, funcName, *args):\n effectInfoMsgs = []\n for effect in self.effects:\n if \"Froze\" in self.states:\n if effect.id != \"Freeze\" and not isinstance(effect, AbstractBorder):\n continue\n func = getattr(effect, funcName)\n args = func(*args)\n args = () if args is None else args\n args = (args, ) if not isinstance(args, tuple) else args\n if effect.effectInfoMsg:\n effectInfoMsgs.append(effect.effectInfoMsg)\n effect.effectInfoMsg = \"\"\n effectInfoMsgs = \"\\n\".join(effectInfoMsgs)\n self.removeEmptyEffect()\n if len(args) == 0:\n return effectInfoMsgs\n if len(args) == 1:\n return args[0], effectInfoMsgs\n # 多于一个参数的情况,需要用tuple接收返回参数\n return args, effectInfoMsgs\n\n def getPoints(self):\n self.attack = utils.runDiceByString(self.nowCard.atkPoint)\n self.attack, atkInfo = self.runEffect(\"onAttackCalc\", self.attack)\n self.defence = utils.runDiceByString(self.nowCard.defPoint)\n self.defence, defInfo = self.runEffect(\"onDefenceCalc\", self.defence)\n self.dodge = utils.runDiceByString(self.nowCard.dodPoint)\n self.dodge, dodInfo = self.runEffect(\"onDodgeCalc\", self.dodge)\n self.attack, self.defence, self.dodge = max(self.attack, 0), max(self.defence, 0), max(self.dodge, 0)\n pointInfo = atkInfo + defInfo + dodInfo + f'{self.name} Hp:{self.nowHp} Atk:{self.attack} Def:{self.defence} Dod:{self.dodge}\\n'\n return self.attack, pointInfo\n\n def calcHurt(self, enemyAtk):\n dodSuccess = True if self.dodge >= enemyAtk else False\n dodSuccess, dodInfo = self.runEffect('onDodgeSuccessJudge', dodSuccess)\n dodSuccess, enemyDodInfo = self.enemy.runEffect('onEnemyDodgeSuccessJudge', dodSuccess)\n defSuccess = True\n defSuccess, defInfo = self.runEffect('onDefenceSuccessJudge', defSuccess)\n defSuccess, enemyDefInfo = self.enemy.runEffect('onEnemyDefenceSuccessJudge', defSuccess)\n self.dodSuccess, self.defSuccess = dodSuccess, defSuccess\n hurtValue = 0 if dodSuccess else max(enemyAtk - self.defence, 1) if defSuccess else enemyAtk\n hurtValue, hurtInfo = self.runEffect('onHurtValueCalc', hurtValue)\n hurtValue, enemyHurtInfo = self.enemy.runEffect('onEnemyHurtValueCalc', hurtValue)\n hurtValue = 0 if hurtValue < 0 else hurtValue\n calcInfo = dodInfo + enemyDodInfo + defInfo + enemyDefInfo + hurtInfo + enemyHurtInfo + f'{self.name} 预计受伤:{hurtValue} \\n'\n return hurtValue, calcInfo\n\n def getCardDescribe(self):\n return self.nowCard.getCardDescribe(self.nowCardOrder)\n\n def cleanTurnTempData(self):\n self.attack, self.defence, self.dodge = 0, 0, 0\n self.dodSuccess, self.defSuccess = None, None\n\n def shouldChangeCard(self):\n return self.nowHp <= 0\n\n def shouldEnd(self):\n return self.nowCardOrder > 5\n\n def setNewMainCard(self):\n self.nowCardOrder += 1\n if self.nowCardOrder > 5:\n return False\n nowCardId = self.cardIdList[self.nowCardOrder - 1]\n self.nowCard = utils.getCardObjById(nowCardId)\n self.nowCard.setPlayerInfo(self, self.enemy)\n self.nowHp = self.nowCard.cardHp\n print(self.nowCard)\n return True"
}
] | from plugins.scBattle.scBattleObj import Battle
from plugins.scBattle.scBattlerObj import Battler
from nonebot import on_command, CommandSession
import plugins.scBattle.scBattleUtils as utils
import dbConnection.kusa_item as itemDB
import re
import string
import codecs
import nonebot | 6,327 |
async def battleMain(battle: Battle):
await sendTitle(battle.creatorId)
await sendTitle(battle.joinerId)
print('BeforeGameStart:' + str(battleList))
battle.gameStart()
print('OnGameStart:' + str(battleList))
gameBreak = False
while not gameBreak:
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnStart()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnGetBasePoint()
battle.turnHurtValueCalc()
battle.turnHpChange()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnEnd()
print('OnMainCycleEnd:' + str(battleList))
endGame, loserName = battle.endGameCheck()
await battleEnd(battle, loserName)
async def battleEnd(battle: Battle, loserName):
global battleList
message = ''
if len(loserName) == 1:
message = f"{loserName[0]} 已被击破!"
elif len(loserName) == 2:
message = f"{loserName[0]} 和 {loserName[1]} 同时被对方击破!"
await bot.send_group_msg(group_id=battle.groupId, message=message)
print('BeforeEndGame:' + str(battleList))
battleList.pop(battle.creatorId)
async def sendTitle(userId):
titleExist = await itemDB.getItemAmount(userId, '早期符卡对战者')
if titleExist == 0:
await itemDB.changeItemAmount(userId, '早期符卡对战者', 1)
@on_command(name='符卡查询', only_to_me=False)
async def showCardInfo(session: CommandSession):
cardId = session.current_arg_text.strip()
cardId = int(cardId)
card = utils.getCardObjById(cardId)
if not card:
await session.send('没有查询到id对应的符卡信息!')
return
await session.send(card.getCardDescribe(cardId))
@on_command(name='符卡配置', only_to_me=False)
async def setCard(session: CommandSession):
userId = session.ctx['user_id']
argText = session.current_arg_text.strip()
isRandom = True if 'random' in argText.lower() else False
regex = r'\d+ \d+ \d+ \d+ \d+'
argMatch = re.search(regex, argText)
if not argMatch and not isRandom:
with codecs.open(u'text/符卡配置帮助.txt', 'r', 'utf-8') as f:
await session.send(f.read().strip())
return
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound is not None and battle.gameRound != 0:
await session.send('对战已开始,不可中途更换符卡!')
return
if isRandom:
setCardSuccess, cost = setCardInRandom(battle, userId)
else:
setCardSuccess, cost = setCardByCardString(battle, userId, argText)
if not setCardSuccess:
if cost is None:
await session.send('符卡配置失败:你选择的某个编号不存在对应符卡。')
else:
await session.send(f'符卡配置失败:你选择的符卡Cost总和为{cost}, 超出Cost上限:7')
return
await session.send(f'符卡配置成功!选择的符卡总Cost为{cost}')
print('BeforeSetCard:' + str(battleList))
if userId not in battle.spellCardSettled:
battle.spellCardSettled.append(userId)
if len(battle.spellCardSettled) == 1:
info = '一位玩家完成了符卡配置!等待另一位玩家。'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard1:' + str(battleList))
elif len(battle.spellCardSettled) == 2:
info = '所有玩家已完成符卡配置,对战启动中……'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard2:' + str(battleList))
await battleMain(battle)
def setCardByCardString(battle: Battle, userId: int, argText: string):
battler = battle.creator if userId == battle.creatorId else battle.joiner
mainCardList = list(map(lambda x: int(x), argText.split(" ")))
return setCardByIdList(battler, mainCardList)
def setCardInRandom(battle: Battle, userId: int):
battler = battle.creator if userId == battle.creatorId else battle.joiner
cardIdList = utils.getRandomCardIdList()
return setCardByIdList(battler, cardIdList)
|
bot = nonebot.get_bot()
battleList = {}
def inBattle(qq) -> Battle or None:
for battle in battleList.values():
if battle.creatorId == qq or battle.joinerId == qq:
return battle
return None
def waitingBattleQQList() -> list:
waitingList = []
for battle in battleList.values():
if not battle.joinerId:
waitingList.append(battle.creatorId)
return waitingList
@on_command(name='符卡对战', only_to_me=False)
async def _(session: CommandSession):
global battleList
userId = session.ctx['user_id']
groupId = session.ctx['group_id']
if inBattle(userId):
await session.send('您已经在一场符卡对战中!')
return
print('BeforeOpen:' + str(battleList))
newBattle = Battle(userId, groupId)
await newBattle.setCreator()
battleList[userId] = newBattle
await session.send('已创建对战,其他人可使用 !加入符卡对战 [对方qq号] 指令加入本场对战。')
print('OnOpen:' + str(battleList))
@on_command(name='取消符卡对战', only_to_me=False)
async def _(session: CommandSession):
global battleList
userId = session.ctx['user_id']
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound:
await session.send('对战已经开始,无法取消。')
return
battleList.pop(userId)
await session.send('已取消对战。')
@on_command(name='加入符卡对战', only_to_me=False)
async def join(session: CommandSession):
global battleList
userId = session.ctx['user_id']
if inBattle(userId):
await session.send('您已经在一场符卡对战中!')
return
argId = session.current_arg_text.strip()
if not argId:
waitingList = waitingBattleQQList()
if len(waitingList) == 0:
await session.send('当前没有正在等待加入的对战。')
return
if len(waitingList) == 1:
argId = waitingList[0]
else:
await session.send('当前有多场对战正在等待加入,请指定开启对战方的qq号。')
return
battle = inBattle(int(argId))
if not battle:
await session.send('该符卡对战未开启。')
return
if battle.joinerId:
await session.send('该符卡对战人员已满。')
return
print('BeforeJoin:' + str(battleList))
await battle.joinBattle(userId)
await session.send(f'加入对战成功!等待双方配置符卡……\n使用“!符卡配置”指令以进行配置,建议私聊配置\n当前所有符卡列表:https://docs.qq.com/sheet/DSHNYTW9mWEhTVWJx')
battleList[int(argId)] = battle
print('OnJoin:' + str(battleList))
async def battleMain(battle: Battle):
await sendTitle(battle.creatorId)
await sendTitle(battle.joinerId)
print('BeforeGameStart:' + str(battleList))
battle.gameStart()
print('OnGameStart:' + str(battleList))
gameBreak = False
while not gameBreak:
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnStart()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnGetBasePoint()
battle.turnHurtValueCalc()
battle.turnHpChange()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnEnd()
print('OnMainCycleEnd:' + str(battleList))
endGame, loserName = battle.endGameCheck()
await battleEnd(battle, loserName)
async def battleEnd(battle: Battle, loserName):
global battleList
message = ''
if len(loserName) == 1:
message = f"{loserName[0]} 已被击破!"
elif len(loserName) == 2:
message = f"{loserName[0]} 和 {loserName[1]} 同时被对方击破!"
await bot.send_group_msg(group_id=battle.groupId, message=message)
print('BeforeEndGame:' + str(battleList))
battleList.pop(battle.creatorId)
async def sendTitle(userId):
titleExist = await itemDB.getItemAmount(userId, '早期符卡对战者')
if titleExist == 0:
await itemDB.changeItemAmount(userId, '早期符卡对战者', 1)
@on_command(name='符卡查询', only_to_me=False)
async def showCardInfo(session: CommandSession):
cardId = session.current_arg_text.strip()
cardId = int(cardId)
card = utils.getCardObjById(cardId)
if not card:
await session.send('没有查询到id对应的符卡信息!')
return
await session.send(card.getCardDescribe(cardId))
@on_command(name='符卡配置', only_to_me=False)
async def setCard(session: CommandSession):
userId = session.ctx['user_id']
argText = session.current_arg_text.strip()
isRandom = True if 'random' in argText.lower() else False
regex = r'\d+ \d+ \d+ \d+ \d+'
argMatch = re.search(regex, argText)
if not argMatch and not isRandom:
with codecs.open(u'text/符卡配置帮助.txt', 'r', 'utf-8') as f:
await session.send(f.read().strip())
return
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound is not None and battle.gameRound != 0:
await session.send('对战已开始,不可中途更换符卡!')
return
if isRandom:
setCardSuccess, cost = setCardInRandom(battle, userId)
else:
setCardSuccess, cost = setCardByCardString(battle, userId, argText)
if not setCardSuccess:
if cost is None:
await session.send('符卡配置失败:你选择的某个编号不存在对应符卡。')
else:
await session.send(f'符卡配置失败:你选择的符卡Cost总和为{cost}, 超出Cost上限:7')
return
await session.send(f'符卡配置成功!选择的符卡总Cost为{cost}')
print('BeforeSetCard:' + str(battleList))
if userId not in battle.spellCardSettled:
battle.spellCardSettled.append(userId)
if len(battle.spellCardSettled) == 1:
info = '一位玩家完成了符卡配置!等待另一位玩家。'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard1:' + str(battleList))
elif len(battle.spellCardSettled) == 2:
info = '所有玩家已完成符卡配置,对战启动中……'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard2:' + str(battleList))
await battleMain(battle)
def setCardByCardString(battle: Battle, userId: int, argText: string):
battler = battle.creator if userId == battle.creatorId else battle.joiner
mainCardList = list(map(lambda x: int(x), argText.split(" ")))
return setCardByIdList(battler, mainCardList)
def setCardInRandom(battle: Battle, userId: int):
battler = battle.creator if userId == battle.creatorId else battle.joiner
cardIdList = utils.getRandomCardIdList()
return setCardByIdList(battler, cardIdList)
| def setCardByIdList(battler: Battler, cardIdList: list): | 1 | 2023-11-02 04:06:31+00:00 | 8k |
ilur98/DGQ | dgq/entry.py | [
{
"identifier": "PTQ",
"path": "dgq/quant/quant_sequence.py",
"snippet": "@torch.no_grad()\ndef PTQ(model, enc, \n qconfig, \n nsamples=128, seqlen=2048):\n dev = \"cuda:0\"\n layers = get_blocks(model)\n layer_kwargs = {}\n cache={'i': 0}\n layers[0] = layers[0].cuda()\n move_embed(model, dev)\n\n dtype = next(iter(model.parameters())).dtype\n inps = torch.zeros((nsamples, seqlen, model.config.hidden_size), dtype=dtype, device=dev)\n outs = torch.zeros_like(inps)\n class Catcher(nn.Module):\n\n def __init__(self, module):\n super().__init__()\n self.module = module\n\n def forward(self, inp, **kwargs):\n inps[cache['i']] = inp\n cache['i'] += 1\n layer_kwargs.update(kwargs)\n raise ValueError\n \n layers[0] = Catcher(layers[0])\n for batch in enc:\n try:\n model(batch[0].to(dev))\n except ValueError:\n pass\n del enc\n layers[0] = layers[0].module # restore\n # inps = inps[0]\n\n layers[0] = layers[0].cpu()\n move_embed(model, \"cpu\")\n for i in range(len(layers)):\n print(i)\n layer = layers[i].to(dev)\n full = find_layers(layer, [QuantLinear])\n sequential = [list(full.keys())]\n set_quant_state(layer, False, False)\n prepare_hook(layer, inps, qconfig, layer_kwargs)\n if qconfig[\"meanact\"]:\n mean_bias(layer)\n if qconfig[\"smoothquant\"]:\n smooth_module(layer)\n if qconfig[\"kvquant\"]:\n kvquant(layer)\n for names in sequential:\n subset = {n: full[n] for n in names}\n helpers = {}\n for name in subset:\n helpers[name] = QuantizerHelper(subset[name])\n helpers[name].quantizer.configure(qconfig[\"wt_quant\"][\"bits\"], perchannel=True, sym=False, mse=False)\n def add_batch(name):\n def tmp(_, inp, out):\n helpers[name].add_batch(inp[0].data, out.data)\n\n return tmp\n\n handles = []\n for name in subset:\n handles.append(subset[name].register_forward_hook(add_batch(name)))\n for j in range(nsamples):\n outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]\n for h in handles:\n h.remove()\n \n for name in subset:\n if qconfig[\"wt_quant\"][\"method\"] == \"gptq\":\n scale, zero = helpers[name].gptqquant(percdamp=qconfig[\"percdamp\"], groupsize=qconfig[\"wt_quant\"][\"groupsize\"], actorder=qconfig[\"act_order\"], name=name)\n elif qconfig[\"wt_quant\"][\"method\"] == \"search\":\n scale, zero, scale8 = helpers[name].searchquant(groupsize=qconfig[\"wt_quant\"][\"groupsize\"], W4W8=qconfig[\"wt_quant\"][\"w4w8\"])\n elif qconfig[\"wt_quant\"][\"method\"] == \"naive\":\n scale, zero = helpers[name].naivequant(groupsize=qconfig[\"wt_quant\"][\"groupsize\"])\n else:\n raise NotImplemented\n if qconfig[\"wt_quant\"][\"w4w8\"]:\n subset[name].packW4W8(scale, zero, scale8)\n else:\n subset[name].pack(scale, zero)\n if qconfig[\"act_quant\"] is not None:\n clamp = subset[name].inp_absmax.max()\n subset[name].amax = clamp\n delattr(subset[name], \"inp_absmax\")\n subset[name].prepare_actfun()\n helpers[name].free()\n set_quant_state(layer, qconfig['act_quant'] != None, qconfig['wt_quant'] != None) \n for j in range(nsamples):\n outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]\n layers[i] = layer.cpu()\n del layer\n # del helpers\n torch.cuda.empty_cache()\n\n inps, outs = outs, inps"
},
{
"identifier": "get_loaders",
"path": "dgq/utils/datautils.py",
"snippet": "def get_loaders(name, nsamples=128, seed=0, seqlen=2048, model=''):\n if 'wikitext2' in name:\n return get_wikitext2(nsamples, seed, seqlen, model)\n if 'ptb' in name:\n if 'new' in name:\n return get_ptb_new(nsamples, seed, seqlen, model)\n return get_ptb(nsamples, seed, seqlen, model)\n if 'c4' in name:\n if 'new' in name:\n return get_c4_new(nsamples, seed, seqlen, model)\n return get_c4(nsamples, seed, seqlen, model)"
},
{
"identifier": "prepare_mmlu",
"path": "dgq/utils/datautils.py",
"snippet": "def prepare_mmlu(model, mmlu_dataset):\n from transformers import AutoTokenizer\n try:\n tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)\n except:\n tokenizer = AutoTokenizer.from_pretrained(model, use_fast=True)\n if 'llama' in model.lower():\n tokenizer.eos_token = \"</s>\"\n tokenizer.eos_token_id = 2 # OPT eos-token-id\n\n # add pad token if not present\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n if mmlu_dataset == 'mmlu-zs':\n mmlu_dataset = load_dataset(\"json\", data_files={\n 'eval': 'data/mmlu/zero_shot_mmlu_val.json',\n 'test': 'data/mmlu/zero_shot_mmlu_test.json',\n })\n mmlu_dataset = mmlu_dataset.remove_columns('subject')\n # MMLU Five-shot (Eval/Test only)\n elif mmlu_dataset == 'mmlu' or mmlu_dataset == 'mmlu-fs':\n mmlu_dataset = load_dataset(\"json\", data_files={\n 'eval': 'data/mmlu/five_shot_mmlu_val.json',\n 'test': 'data/mmlu/five_shot_mmlu_test.json',\n })\n # mmlu_dataset = mmlu_dataset[mmlu_split]\n # mmlu_dataset = mmlu_dataset.select(range(nsamples))\n abcd_idx = [\n tokenizer(\"A\", add_special_tokens=False).input_ids[0],\n tokenizer(\"B\", add_special_tokens=False).input_ids[0],\n tokenizer(\"C\", add_special_tokens=False).input_ids[0],\n tokenizer(\"D\", add_special_tokens=False).input_ids[0],\n ]\n data_collator = DataCollatorForCausalLM(\n tokenizer=tokenizer,\n source_max_len=2048,\n target_max_len=512,\n train_on_source=False,\n predict_with_generate=False\n )\n mmlu_dataloader_test = DataLoader(mmlu_dataset['test'],\n collate_fn=data_collator,\n sampler=SequentialSampler(mmlu_dataset['test']),\n batch_size=2)\n return mmlu_dataset['test'], mmlu_dataloader_test, abcd_idx"
},
{
"identifier": "model_eval",
"path": "dgq/utils/evalutils.py",
"snippet": "@torch.no_grad()\ndef model_eval(model, testenc, dev, local_args=None):\n testenc = testenc.input_ids\n nsamples = testenc.numel() // model.seqlen\n # model = model.to(dev)\n model.eval()\n model.config.use_cache = False\n # testenc = testenc.to(dev)\n layers = get_blocks(model)\n layer_kwargs = {}\n cache={'i': 0}\n layers[0] = layers[0].to(dev)\n move_embed(model, dev)\n dtype = next(iter(model.parameters())).dtype\n inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev)\n torch.cuda.memory_summary()\n class Catcher(nn.Module):\n\n def __init__(self, module):\n super().__init__()\n self.module = module\n\n def forward(self, inp, **kwargs):\n inps[cache['i']] = inp\n cache['i'] += 1\n layer_kwargs.update(kwargs)\n raise ValueError\n \n layers[0] = Catcher(layers[0])\n for i in range(nsamples):\n batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)\n try:\n model(batch)\n except ValueError:\n pass\n layers[0] = layers[0].module # restore\n layers[0] = layers[0].cpu()\n move_embed(model, \"cpu\")\n outs = torch.zeros_like(inps)\n torch.cuda.empty_cache()\n for i in range(len(layers)):\n print(i)\n layer = layers[i].to(dev)\n for j in range(nsamples):\n outs[j] = layer(inps[j].unsqueeze(0), **layer_kwargs)[0]\n layers[i] = layer.cpu()\n del layer\n torch.cuda.empty_cache()\n inps, outs = outs, inps\n mod_list = move_norm_head(model, dev)\n testenc = testenc.to(dev)\n nlls = []\n for i in range(nsamples):\n hidden_states = inps[i].unsqueeze(0)\n for mod in mod_list:\n hidden_states = mod(hidden_states)\n lm_logits = model.lm_head(hidden_states)\n shift_logits = lm_logits[:, :-1, :].contiguous()\n shift_labels = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)][:, 1:]\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n neg_log_likelihood = loss.float() * model.seqlen\n nlls.append(neg_log_likelihood)\n ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))\n print(ppl.item())"
},
{
"identifier": "total_model_eval",
"path": "dgq/utils/evalutils.py",
"snippet": "@torch.no_grad()\ndef total_model_eval(model, testenc, dev, local_args=None):\n # testenc = testenc.cpu()\n testenc = testenc.input_ids\n nsamples = testenc.numel() // model.seqlen\n model = model.to(dev)\n model.eval()\n model.config.use_cache = False\n torch.cuda.memory_summary()\n model = model.to(dev)\n nlls = []\n for i in range(nsamples):\n print(i)\n batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)\n out = model(batch)['logits']\n shift_logits = out[:, :-1, :].contiguous()\n shift_labels = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)][:, 1:].cuda()\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)).cpu()\n neg_log_likelihood = loss.float() * model.seqlen\n nlls.append(neg_log_likelihood)\n torch.cuda.empty_cache()\n ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))\n print(ppl.item())"
},
{
"identifier": "mmlu_eval",
"path": "dgq/utils/evalutils.py",
"snippet": "def mmlu_eval(model, mmlu_dataset, data_loader, abcd_idx, dev, local_args=None):\n abcd_idx = abcd_idx\n model.eval()\n preds, refs = [], []\n loss_mmlu = 0\n cnt = 0 \n for batch in tqdm(data_loader, total=len(data_loader)):\n cnt += 1 \n \n batch = to_device(batch, model.device)\n with torch.no_grad():\n outputs = model(**batch)\n loss = outputs.loss\n logits = outputs.logits\n labels = batch['labels']\n # There are two tokens, the output, and eos token.\n for i, logit in enumerate(logits):\n label_non_zero_id = (batch['labels'][i] != -100).nonzero()[0][0]\n logit_abcd = logit[label_non_zero_id-1][abcd_idx]\n preds.append(torch.argmax(logit_abcd).item())\n labels = labels[labels != IGNORE_INDEX].view(-1, 2)[:,0]\n refs += [abcd_idx.index(label) for label in labels.tolist()]\n loss_mmlu += loss.item()\n # Extract results by subject.\n results = {'mmlu_loss':loss_mmlu/len(data_loader)}\n subject = mmlu_dataset['subject']\n subjects = {s:{'refs':[], 'preds':[]} for s in set(subject)}\n for s,p,r in zip(subject, preds, refs):\n subjects[s]['preds'].append(p)\n subjects[s]['refs'].append(r)\n subject_scores = []\n for subject in subjects:\n nn = len(subjects[subject]['refs'])\n subject_score = 0 if nn==0 else sum([subjects[subject]['refs'][ii] == subjects[subject]['preds'][ii] for ii in range(nn)])/nn\n results[f'accuracy_{subject}'] = subject_score\n subject_scores.append(subject_score)\n results[f'accuracy'] = np.mean(subject_scores)\n return results"
},
{
"identifier": "load_quant",
"path": "dgq/utils/loadutils.py",
"snippet": "def load_quant(model, checkpoint):\n if checkpoint.endswith('.safetensors'):\n from safetensors.torch import load_file as safe_load\n state_dict = model.state_dict()\n ckt = safe_load(checkpoint)\n for key in ckt.keys():\n try:\n state_dict[key].copy_(ckt[key])\n except Exception as e:\n print(key)\n print(e)\n pars = key.split('.')\n att = pars[-1]\n modname = '.'.join(pars[1:-1])\n for name,mod in model.named_modules():\n if modname in name:\n delattr(mod,att)\n mod.register_buffer(att, ckt[key])\n # model.load_state_dict(ckt)\n else:\n model.load_state_dict(torch.load(checkpoint))\n\n for sublayer in model.modules():\n if isinstance(sublayer,QuantLinear):\n sublayer.prepare_actfun() \n delattr(sublayer, \"weight\")\n\n\n model.seqlen = 2048\n print('Done.')\n return model"
},
{
"identifier": "inference_model",
"path": "dgq/utils/loadutils.py",
"snippet": "def inference_model(model):\n if isinstance(model, OPTForCausalLM):\n decoder_layer_scales = []\n for layer in model.model.decoder.layers:\n decoder_layer_scale = {\"attn_input_scale\": layer.self_attn.q_proj.amax.float() / (2 ** 7 - 1),\n \"q_output_scale\": layer.self_attn.q_quant.scale.float(),\n \"k_output_scale\": layer.self_attn.k_quant.scale.float(),\n \"v_output_scale\": layer.self_attn.v_quant.scale.float(),\n \"out_input_scale\": layer.self_attn.out_proj.amax.float() / (2 ** 7 - 1),\n \"fc1_input_scale\": layer.fc1.amax.float() / (2 ** 7 - 1),\n \"fc2_input_scale\": layer.fc2.amax.float() / (2 ** 7 - 1)}\n decoder_layer_scales.append(decoder_layer_scale)\n seqlen = model.seqlen\n model = A8W4OPTForCausalLM.from_float(model, decoder_layer_scales)\n model.seqlen = seqlen\n elif isinstance(model, LlamaForCausalLM):\n decoder_layer_scales = []\n for layer in model.model.layers:\n decoder_layer_scale = {\"attn_input_scale\": layer.self_attn.q_proj.amax.float() / (2 ** 7 - 1),\n \"q_output_scale\": layer.self_attn.q_quant.scale.float(),\n \"k_output_scale\": layer.self_attn.k_quant.scale.float(),\n \"v_output_scale\": layer.self_attn.v_quant.scale.float(),\n \"out_input_scale\": layer.self_attn.o_proj.amax.float() / (2 ** 7 - 1),\n \"mlp_input_scale\": layer.mlp.up_proj.amax.float() / (2 ** 7 - 1),\n \"down_input_scale\": layer.mlp.down_proj.amax.float() / (2 ** 7 - 1)}\n decoder_layer_scales.append(decoder_layer_scale)\n seqlen = model.seqlen\n model = A8W4LlamaForCausalLM.from_float(model, decoder_layer_scales)\n model.seqlen = seqlen\n else:\n raise NotImplementedError\n return model"
},
{
"identifier": "convert_model",
"path": "dgq/utils/modelutils.py",
"snippet": "def convert_model(module, qconfig):\n if isinstance(module, QuantLinear):\n return\n for name, mod in module.named_children():\n if isinstance(mod, nn.Linear) and not name.endswith(\"head\"):\n newlayer = QuantLinear(mod.in_features, mod.out_features, hasattr(mod, \"bias\"), qconfig)\n newlayer.weight = mod.weight\n if hasattr(mod, \"bias\"):\n newlayer.bias = mod.bias\n setattr(module, name, newlayer)\n elif isinstance(mod, OPTAttention):\n OPTAttention_QKVQuant(mod, qconfig)\n elif isinstance(mod, BloomAttention):\n BLOOMAttention_QKVQuant(mod, qconfig)\n elif isinstance(mod, LlamaAttention):\n LlamaAttention_QKVQuant(mod, qconfig)\n convert_model(mod, qconfig)"
}
] | import argparse
import numpy as np
import torch
import torch.nn as nn
import time
import lm_eval
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
from texttable import Texttable
from dgq.quant.quant_sequence import PTQ
from dgq.utils.datautils import get_loaders, prepare_mmlu
from dgq.utils.evalutils import model_eval, total_model_eval, mmlu_eval
from dgq.utils.loadutils import load_quant, inference_model
from dgq.utils.modelutils import convert_model
from safetensors.torch import save_file as safe_save | 5,657 |
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='llama model to load')
parser.add_argument('dataset', type=str, choices=['wikitext2', 'ptb', 'c4'], help='Where to extract calibration data from.')
parser.add_argument('--nsamples', type=int, default=18, help='Number of calibration data samples.')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--wbits', type=int, default=4, choices=[2, 3, 4, 8, 16], help='#bits to use for weight quantization; use 16 for evaluating base model.')
parser.add_argument('--abits', type=int, default=8, choices=[8, 16], help='#bits to use for activation quantization; use 16 for evaluating base model.')
parser.add_argument('--percdamp', type=float, default=.01, help='Percent of the average Hessian diagonal to use for dampening.')
parser.add_argument('--save', type=str, default='', help='Save quantized checkpoint under this name.')
parser.add_argument('--save_safetensors', type=str, default='', help='Save quantized `.safetensors` checkpoint under this name.')
parser.add_argument('--load', type=str, default='', help='Load quantized model.')
parser.add_argument('--benchmark', type=int, default=0, help='Number of tokens to use for benchmarking.')
parser.add_argument('--check', action='store_true', help='Whether to compute perplexity during benchmarking for verification.')
parser.add_argument('--groupsize', type=int, default=-1, help='Groupsize to use for quantization; default uses full row.')
parser.add_argument('--sym', action='store_true', help='Whether to perform symmetric quantization.')
parser.add_argument('--act-order', action='store_true', help='Whether to apply the activation order GPTQ heuristic')
parser.add_argument('--true-sequential', action='store_true', help='Whether to run in true sequential model.')
parser.add_argument('--act_fun', type=str, default='static', help='activation quantization.')
parser.add_argument('--wt_fun', type=str, default='naive', help='weight quantization.')
parser.add_argument('--smoothquant', action='store_true', help='whether to ')
parser.add_argument('--kvquant', action='store_true', help='whether to ')
parser.add_argument('--meanact', action='store_true', help='whether to ')
parser.add_argument('--observe', action='store_true', help='whether to ')
parser.add_argument('--nearest', action='store_true', help='whether to ')
parser.add_argument('--w4w8', action='store_true', help='wheter to open dual grained quantization')
parser.add_argument('--eval', action='store_true', help='evaluate quantized model.')
parser.add_argument('--mmlu_eval', type=str, default="no", help="mmlu evaluation.")
parser.add_argument('--csqa_eval', type=str, default="no", help="csqa evaluation.")
parser.add_argument('--inference_mod', action='store_true', help='whether to ')
args = parser.parse_args()
def generate_qconfig(args):
qconfig = {}
if args.act_fun == "no":
qconfig["act_quant"] = None
else:
act_qconfig = {}
act_qconfig["bits"] = args.abits
act_qconfig["method"] = args.act_fun
qconfig["act_quant"] = act_qconfig
if args.wt_fun == "no":
qconfig["wt_quant"] = None
else:
wt_qconfig = {}
wt_qconfig["bits"] = args.wbits
wt_qconfig["method"] = args.wt_fun
wt_qconfig["groupsize"] = args.groupsize
wt_qconfig["w4w8"] = hasattr(args, "w4w8") and args.w4w8
qconfig["wt_quant"] = wt_qconfig
qconfig["smoothquant"] = hasattr(args, "smoothquant") and args.smoothquant
qconfig["meanact"] = hasattr(args, "meanact") and args.meanact
qconfig["kvquant"] = hasattr(args, "kvquant") and args.kvquant
return qconfig
def prepare_model(model, seqlen=2048):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.bfloat16)
model.seqlen = seqlen
return model
def main():
model = prepare_model(args.model)
qconfig = generate_qconfig(args)
convert_model(model, qconfig)
print(args)
enc, _ = get_loaders(args.dataset, args.nsamples, model=args.model)
if args.load:
load_quant(model, args.load)
if hasattr(args, "inference_mod"):
model = inference_model(model)
else:
tick = time.time()
PTQ(model, enc, qconfig, args.nsamples)
print(time.time() - tick)
if args.save_safetensors:
model = model.cpu()
state_dict = model.state_dict()
state_dict = {k: v.clone().contiguous() for k, v in state_dict.items()}
safe_save(state_dict, args.save_safetensors)
if args.save:
model = model.cpu()
torch.save(model.state_dict(), args.save)
if args.eval:
datasets = ['wikitext2', 'ptb', 'c4']
for dataset in datasets:
_, testloader = get_loaders(dataset, seed=args.seed, model=args.model, seqlen=model.seqlen)
print(dataset)
# model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
total_model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
|
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='llama model to load')
parser.add_argument('dataset', type=str, choices=['wikitext2', 'ptb', 'c4'], help='Where to extract calibration data from.')
parser.add_argument('--nsamples', type=int, default=18, help='Number of calibration data samples.')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--wbits', type=int, default=4, choices=[2, 3, 4, 8, 16], help='#bits to use for weight quantization; use 16 for evaluating base model.')
parser.add_argument('--abits', type=int, default=8, choices=[8, 16], help='#bits to use for activation quantization; use 16 for evaluating base model.')
parser.add_argument('--percdamp', type=float, default=.01, help='Percent of the average Hessian diagonal to use for dampening.')
parser.add_argument('--save', type=str, default='', help='Save quantized checkpoint under this name.')
parser.add_argument('--save_safetensors', type=str, default='', help='Save quantized `.safetensors` checkpoint under this name.')
parser.add_argument('--load', type=str, default='', help='Load quantized model.')
parser.add_argument('--benchmark', type=int, default=0, help='Number of tokens to use for benchmarking.')
parser.add_argument('--check', action='store_true', help='Whether to compute perplexity during benchmarking for verification.')
parser.add_argument('--groupsize', type=int, default=-1, help='Groupsize to use for quantization; default uses full row.')
parser.add_argument('--sym', action='store_true', help='Whether to perform symmetric quantization.')
parser.add_argument('--act-order', action='store_true', help='Whether to apply the activation order GPTQ heuristic')
parser.add_argument('--true-sequential', action='store_true', help='Whether to run in true sequential model.')
parser.add_argument('--act_fun', type=str, default='static', help='activation quantization.')
parser.add_argument('--wt_fun', type=str, default='naive', help='weight quantization.')
parser.add_argument('--smoothquant', action='store_true', help='whether to ')
parser.add_argument('--kvquant', action='store_true', help='whether to ')
parser.add_argument('--meanact', action='store_true', help='whether to ')
parser.add_argument('--observe', action='store_true', help='whether to ')
parser.add_argument('--nearest', action='store_true', help='whether to ')
parser.add_argument('--w4w8', action='store_true', help='wheter to open dual grained quantization')
parser.add_argument('--eval', action='store_true', help='evaluate quantized model.')
parser.add_argument('--mmlu_eval', type=str, default="no", help="mmlu evaluation.")
parser.add_argument('--csqa_eval', type=str, default="no", help="csqa evaluation.")
parser.add_argument('--inference_mod', action='store_true', help='whether to ')
args = parser.parse_args()
def generate_qconfig(args):
qconfig = {}
if args.act_fun == "no":
qconfig["act_quant"] = None
else:
act_qconfig = {}
act_qconfig["bits"] = args.abits
act_qconfig["method"] = args.act_fun
qconfig["act_quant"] = act_qconfig
if args.wt_fun == "no":
qconfig["wt_quant"] = None
else:
wt_qconfig = {}
wt_qconfig["bits"] = args.wbits
wt_qconfig["method"] = args.wt_fun
wt_qconfig["groupsize"] = args.groupsize
wt_qconfig["w4w8"] = hasattr(args, "w4w8") and args.w4w8
qconfig["wt_quant"] = wt_qconfig
qconfig["smoothquant"] = hasattr(args, "smoothquant") and args.smoothquant
qconfig["meanact"] = hasattr(args, "meanact") and args.meanact
qconfig["kvquant"] = hasattr(args, "kvquant") and args.kvquant
return qconfig
def prepare_model(model, seqlen=2048):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.bfloat16)
model.seqlen = seqlen
return model
def main():
model = prepare_model(args.model)
qconfig = generate_qconfig(args)
convert_model(model, qconfig)
print(args)
enc, _ = get_loaders(args.dataset, args.nsamples, model=args.model)
if args.load:
load_quant(model, args.load)
if hasattr(args, "inference_mod"):
model = inference_model(model)
else:
tick = time.time()
PTQ(model, enc, qconfig, args.nsamples)
print(time.time() - tick)
if args.save_safetensors:
model = model.cpu()
state_dict = model.state_dict()
state_dict = {k: v.clone().contiguous() for k, v in state_dict.items()}
safe_save(state_dict, args.save_safetensors)
if args.save:
model = model.cpu()
torch.save(model.state_dict(), args.save)
if args.eval:
datasets = ['wikitext2', 'ptb', 'c4']
for dataset in datasets:
_, testloader = get_loaders(dataset, seed=args.seed, model=args.model, seqlen=model.seqlen)
print(dataset)
# model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
total_model_eval(model, testloader, torch.device('cuda:0'), local_args=args) | if args.mmlu_eval != 'no': | 5 | 2023-11-01 13:45:16+00:00 | 8k |
noco-ai/elemental-golem | modules/turboderp/exllama/golem-generator.py | [
{
"identifier": "LlmHandler",
"path": "application/llm_handler.py",
"snippet": "class LlmHandler(BaseHandler):\n \n def __init__(self):\n super().__init__()\n\n def load(self, model, model_options, local_path):\n pass\n \n def load_config_settings(self, num_input_tokens, request):\n config = self.model_config\n max_new_tokens_config = int(request.get(\"max_new_tokens\", 1024))\n max_seq_len = config.get(\"max_seq_len\", 2048)\n max_new_tokens = min(max_new_tokens_config, max_seq_len - num_input_tokens)\n top_p = request.get(\"top_p\", 0.9)\n top_k = request.get(\"top_k\", 50)\n seed = request.get(\"seed\", -1)\n min_p = request.get(\"min_p\", 0.05)\n mirostat = request.get(\"mirostat\", 0)\n mirostat_eta = request.get(\"mirostat_eta\", 0.01)\n mirostat_tau = request.get(\"mirostat_tau\", 5)\n temperature = request.get(\"temperature\", 1)\n stream_output = True if \"stream\" in request and request[\"stream\"] == True else False\n debug = \"debug\" in request\n stop_key = request.get(\"stop_key\", \"<stop>\") \n\n logger.info(f\"prompt tokens: {num_input_tokens}, max completion tokens: {max_new_tokens}, context length: {max_seq_len}\")\n logger.info(f\"temperature: {temperature}, top_p: {top_p}, top_k: {top_k}, seed: {seed}, stream output: {stream_output}\")\n logger.info(f\"min_p: {min_p}, mirostat: {mirostat}, mirostat_eta: {mirostat_eta}, mirostat_tau: {mirostat_tau}\")\n return max_new_tokens, top_p, top_k, seed, temperature, stream_output, debug, stop_key, min_p, mirostat, mirostat_eta, mirostat_tau \n\n def build_stop_conditions(self, stops, to_lower = True):\n check_stop_token = False\n stop_conditions = []\n for stop_text in stops: \n if stop_text == \"<stop>\":\n check_stop_token = True\n continue\n add_condition = stop_text.lower() if to_lower == True else stop_text\n stop_conditions.append(add_condition)\n \n return check_stop_token, stop_conditions\n\n def check_stop_conditions(self, token, res_line, eos_token, check_stop_token, stop_conditions):\n if check_stop_token and token == eos_token:\n return True\n\n for stop_string in stop_conditions:\n if res_line.lower().endswith(stop_string):\n return True\n \n return False\n \n def finish_response(self, stop_key, response, request, stream_output,\n finish_reason, tokens_per_second, new_tokens, input_tokens, model, elapsed, debug):\n if debug and stream_output == False:\n print('\\033[92m' + response + '\\033[0m')\n\n send_content = \"\"\n if stream_output:\n send_content = stop_key \n elif \"start_response\" in request:\n send_content = f\"{request['start_response']}{response}\"\n else:\n send_content = response\n\n llm_response = {\"content\": send_content, \"finish_reason\": finish_reason, \n \"tokens_per_second\": round(tokens_per_second, 2), \"completion_tokens\": new_tokens, \"prompt_tokens\": input_tokens, \"model\": model }\n \n if debug:\n print(llm_response)\n\n logger.info(f\"prompt processed in {elapsed:.2f} seconds, new tokens: {new_tokens}, tokens/second: {tokens_per_second:.2f}\") \n return llm_response\n \n def get_token_count(self, input_text):\n return 100000\n \n def _get_system_prompt(self, request, config):\n system_prompt = \"\"\n in_request = False\n contains_user_message = False\n\n if \"system_message\" in config and len(config[\"system_message\"]):\n system_prompt = config['system_message'] \n\n # override with system prompt provided by request\n messages_len = len(request[\"messages\"])\n if messages_len and request[\"messages\"][0][\"role\"] == \"system\":\n system_prompt = request['messages'][0]['content'] \n in_request = True\n\n if \"system_prompt_format\" in config: \n template = config[\"system_prompt_format\"]\n ai_role = request[\"ai_role\"] if \"ai_role\" in request else config[\"ai_role\"]\n user_role = request[\"user_role\"] if \"user_role\" in request else config[\"user_role\"] \n if \"{prompt}\" in template: \n check_index = 1 if in_request else 0\n check_len = 2 if in_request else 1\n prompt = request[\"messages\"][check_index][\"content\"] if messages_len >= check_len and request[\"messages\"][check_index][\"role\"] == \"user\" else \"\"\n response = request[\"messages\"][check_index + 1][\"content\"] if check_index + 1 < messages_len and request[\"messages\"][check_index + 1][\"role\"] == \"assistant\" else \"\"\n system_prompt = template.format(user_role=user_role, system_prompt=system_prompt.strip(), ai_role=ai_role, prompt=prompt, response=response) + \"\\n\"\n contains_user_message = True\n else:\n system_prompt = template.format(user_role=user_role, system_prompt=system_prompt.strip(), ai_role=ai_role)\n \n return system_prompt, in_request, contains_user_message\n \n def _prep_prompt(self, request, config):\n request_system_message = None\n max_new_tokens = request.get(\"max_new_tokens\", 1024)\n max_seq_length = config[\"max_seq_len\"] \n max_input_tokens = max(max_seq_length - max_new_tokens, 0) \n\n if max_input_tokens == 0:\n logger.error(\"error with configuration of models context limits\")\n raise ValueError('error with configuration of models context limits')\n \n # give a little wiggle room for the way the prompt is built\n max_input_tokens -= 64\n \n system_prompt, sys_prompt_in_request, clip_first_user_message = self._get_system_prompt(request, config)\n system_prompt_tokens = self.get_token_count(system_prompt)\n if system_prompt_tokens >= max_input_tokens:\n logger.error(\"system prompt excceds max input tokens\")\n raise ValueError(\"system prompt excceds max input tokens\")\n \n if sys_prompt_in_request:\n request_system_message = request[\"messages\"][0]\n request[\"messages\"].pop(0) \n\n if clip_first_user_message:\n request[\"messages\"].pop(0)\n\n # clip all but last message if this is an instruct model\n if len(request[\"messages\"]) == 0:\n messages = []\n if \"model_type\" in config and config[\"model_type\"] == \"instruct\":\n messages = [request[\"messages\"][-1]]\n else:\n messages = request[\"messages\"][::-1] \n\n return messages, system_prompt_tokens, request_system_message, system_prompt, sys_prompt_in_request, max_input_tokens \n\n def build_prompt(self, request, config, model):\n prompt = \"\"\n \n # raw prompt\n if \"raw\" in request:\n prompt = request[\"raw\"]\n if \"start_response\" in request:\n prompt += request[\"start_response\"] \n return prompt\n\n messages, system_prompt_tokens, request_system_message, system_prompt, sys_prompt_in_request, max_input_tokens = self._prep_prompt(request, config) \n max_input_tokens -= 64\n\n # get delimiter in-between user and prompt and get roles \n ai_role = request[\"ai_role\"] if \"ai_role\" in request else config[\"ai_role\"]\n user_role = request[\"user_role\"] if \"user_role\" in request else config[\"user_role\"]\n template = config[\"prompt_format\"]\n\n prompt_parts = [] \n input_token_count = system_prompt_tokens\n\n for index, message in enumerate(messages):\n \n if message[\"role\"] == \"assistant\":\n continue \n \n ai_response = \"\" if index == 0 else messages[index - 1][\"content\"].strip()\n formatted_string = template.format(user_role=user_role, prompt=message['content'].strip(), ai_role=ai_role, response=ai_response) \n token_count = self.get_token_count(formatted_string) \n if input_token_count + token_count > max_input_tokens:\n break\n\n input_token_count += token_count\n prompt_parts.append(formatted_string) \n\n prompt_parts = prompt_parts[::-1] \n prompt = system_prompt + \"\\n\".join(prompt_parts) \n if \"start_response\" in request:\n prompt += request[\"start_response\"]\n\n return prompt"
},
{
"identifier": "get_gpu_memory_usage",
"path": "application/system_info.py",
"snippet": "def get_gpu_memory_usage(device_id):\n nvmlInit()\n\n try:\n device_handle = nvmlDeviceGetHandleByIndex(device_id)\n memory_info = nvmlDeviceGetMemoryInfo(device_handle)\n used_memory = memory_info.used / (1024 * 1024) # Convert to MB\n total_memory = memory_info.total / (1024 * 1024) # Convert to MB\n free_memory = memory_info.free / (1024 * 1024) # Convert to MB\n except NVMLError as error:\n logger.info(f\"Failed to get GPU memory usage: {error}\")\n used_memory = -1\n finally:\n nvmlShutdown()\n\n return used_memory, free_memory, total_memory"
}
] | from application.llm_handler import LlmHandler
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
from lora import ExLlamaLora
from application.system_info import get_gpu_memory_usage
from huggingface_hub import snapshot_download
import sys
import os
import glob
import time
import logging
import math | 4,029 | if unicode_hold is True:
unicode_hold = False
chunk = res_line[-1:]
# send chunk to front end
if stream_output:
if debug:
print('\033[96m' + chunk, end="")
channel.basic_publish(
exchange=incoming_headers['return_exchange'],
routing_key=incoming_headers['return_routing_key'],
body=chunk, properties=outgoing_properties)
else:
response += chunk
prompt += chunk
held_text = ""
else:
held_text += new_text
# check stop conditions
stop_condition = self.check_stop_conditions(token, res_line, tokenizer.eos_token_id,
check_stop_token, stop_conditions)
if stop_condition: break
end_time = time.time()
elapsed = end_time - begin_time
token_rate = 0 if elapsed == 0 else (new_tokens / elapsed)
generator.end_beam_search()
if debug and stream_output:
print('\033[0m' + "")
if new_tokens == max_new_tokens:
finish_reason = "length"
model_name = incoming_headers["model_name"] if "model_name" in incoming_headers else "not_provided"
resp = self.finish_response(stop_key, response, request, stream_output, finish_reason,
token_rate, new_tokens, input_token_count, model_name, elapsed, debug)
return resp
def load_lora(self, request, model, config):
# load lora from config and override w/ request if present
lora_name = config["default_lora"] if "default_lora" in config else None
if "lora" in request:
lora_name = request["lora"]
if lora_name != None:
if lora_name not in self.loras:
logger.info(f"loading lora {lora_name}")
lora_dir = os.path.join(f"data/loras/", lora_name)
if not os.path.exists(lora_dir):
logger.info("downloading lora {lora_name} from huggingface")
snapshot_download(repo_id=lora_name, local_dir=lora_dir, cache_dir='data/cache', local_dir_use_symlinks=False)
lora_path = os.path.join(f"data/loras/", lora_name, "adapter_model.bin")
lora_config_path = os.path.join(f"data/loras/{lora_name}", "adapter_config.json")
lora = ExLlamaLora(model["model_loaded"], lora_config_path, lora_path)
self.loras[lora_name] = lora
else:
logger.info(f"using lora {lora_name}")
model["generator"].lora = self.loras[lora_name]
else:
model["generator"].lora = None
def execute(self, model, request):
# load lora
config = self.model_config
self.load_lora(request, model, config)
# build prompt
prompt = self.build_prompt(request, config, model)
# copy amqp headers
incoming_headers = model["amqp_headers"]
outgoing_properties = self.copy_queue_headers(incoming_headers)
stream_resp = self.stream(
model["generator"],
model["tokenizer"],
model["model_loaded"],
prompt,
model["amqp_channel"],
incoming_headers,
outgoing_properties,
config["stop_on"],
model,
request)
return stream_resp
def load(self, model, model_options, local_path):
self.model_config = model["configuration"]
# get paths
logger.info(f"starting module {local_path}")
tokenizer_path = os.path.join(local_path, "tokenizer.model")
model_config_path = os.path.join(local_path, "config.json")
st_pattern = os.path.join(local_path, "*.safetensors")
model_path = glob.glob(st_pattern)[0]
# Create config, model, tokenizer and generator
config = ExLlamaConfig(model_config_path)
config.model_path = model_path
config.compress_pos_emb = model["configuration"].get("compress_pos_emb", 1.0)
config.max_seq_len = model["configuration"].get("max_seq_len", 2048)
config.matmul_recons_thd = 8
config.fused_mlp_thd = 2
config.sdp_thd = 8
# set model device
if model_options["device"].startswith("split"):
device_map = model_options["device"].split(':')[1]
config.set_auto_map(device_map)
elif model_options["device"].startswith("cuda"):
device_number = int(model_options["device"].split(':')[1])
device_array = [0]*12
| sys.path.append(os.path.dirname(os.path.realpath(__file__)))
logger = logging.getLogger(__name__)
class GolemExLlamaGenerator(LlmHandler):
def __init__(self):
super().__init__()
self.loras = {}
def update_config(self, config_data):
current_config = self.model_config
merged_config = {**current_config, **config_data}
self.model_config = merged_config
def validate(self, request):
is_valid, errors = self.validate_request(request, 'llm')
return is_valid, errors
def get_token_count(self, input_text):
ids = self.generator.tokenizer.encode(input_text)
input_token_count = len(ids[0])
return input_token_count
def stream(self, generator, tokenizer, model, prompt, channel, incoming_headers,
outgoing_properties, stops, model_data, request):
# setup stop conditions
check_stop_token, stop_conditions = self.build_stop_conditions(stops)
res_line = ""
held_text = ""
response = ""
unicode_hold = False
finish_reason = "stop"
stop_condition = False
new_tokens = 0
stop_generation_counter = 0
ids = generator.tokenizer.encode(prompt)
input_token_count = len(ids[0])
max_new_tokens, top_p, top_k, seed, temperature, stream_output, debug, stop_key, \
min_p, mirostat, mirostat_eta, mirostat_tau = self.load_config_settings(input_token_count, request)
if debug:
print('\033[94m')
print(request)
print(prompt)
print('\033[0m')
socket_id = incoming_headers["socket_id"] if "socket_id" in incoming_headers else None
generator.settings.temperature = temperature
generator.settings.top_p = top_p
begin_time = time.time()
generator.gen_begin(ids)
generator.begin_beam_search()
for i in range(max_new_tokens):
new_tokens += 1
# check if stop generation was requested
stop_generation, stop_generation_counter = self.check_stop_generation(stop_generation_counter,
model_data["stop_generation_event"], model_data["stop_generation_filter"], socket_id)
if stop_generation:
finish_reason = "abort"
break
token = generator.beam_search()
prev_res_line = res_line
res_line = tokenizer.decode(generator.sequence_actual[0, -new_tokens:])
new_text = res_line[len(prev_res_line):]
# new text
chunk = held_text + new_text
# check if we should hold off on streaming this text
hold_text = False
for stop_string in stop_conditions:
if stop_string.startswith(chunk.lower()): hold_text = True
if len(res_line):
check_ord = ord(res_line[-1])
if check_ord == 65533 or check_ord == 55356 or check_ord == 55357:
hold_text = True
unicode_hold = True
if not hold_text:
if unicode_hold is True:
unicode_hold = False
chunk = res_line[-1:]
# send chunk to front end
if stream_output:
if debug:
print('\033[96m' + chunk, end="")
channel.basic_publish(
exchange=incoming_headers['return_exchange'],
routing_key=incoming_headers['return_routing_key'],
body=chunk, properties=outgoing_properties)
else:
response += chunk
prompt += chunk
held_text = ""
else:
held_text += new_text
# check stop conditions
stop_condition = self.check_stop_conditions(token, res_line, tokenizer.eos_token_id,
check_stop_token, stop_conditions)
if stop_condition: break
end_time = time.time()
elapsed = end_time - begin_time
token_rate = 0 if elapsed == 0 else (new_tokens / elapsed)
generator.end_beam_search()
if debug and stream_output:
print('\033[0m' + "")
if new_tokens == max_new_tokens:
finish_reason = "length"
model_name = incoming_headers["model_name"] if "model_name" in incoming_headers else "not_provided"
resp = self.finish_response(stop_key, response, request, stream_output, finish_reason,
token_rate, new_tokens, input_token_count, model_name, elapsed, debug)
return resp
def load_lora(self, request, model, config):
# load lora from config and override w/ request if present
lora_name = config["default_lora"] if "default_lora" in config else None
if "lora" in request:
lora_name = request["lora"]
if lora_name != None:
if lora_name not in self.loras:
logger.info(f"loading lora {lora_name}")
lora_dir = os.path.join(f"data/loras/", lora_name)
if not os.path.exists(lora_dir):
logger.info("downloading lora {lora_name} from huggingface")
snapshot_download(repo_id=lora_name, local_dir=lora_dir, cache_dir='data/cache', local_dir_use_symlinks=False)
lora_path = os.path.join(f"data/loras/", lora_name, "adapter_model.bin")
lora_config_path = os.path.join(f"data/loras/{lora_name}", "adapter_config.json")
lora = ExLlamaLora(model["model_loaded"], lora_config_path, lora_path)
self.loras[lora_name] = lora
else:
logger.info(f"using lora {lora_name}")
model["generator"].lora = self.loras[lora_name]
else:
model["generator"].lora = None
def execute(self, model, request):
# load lora
config = self.model_config
self.load_lora(request, model, config)
# build prompt
prompt = self.build_prompt(request, config, model)
# copy amqp headers
incoming_headers = model["amqp_headers"]
outgoing_properties = self.copy_queue_headers(incoming_headers)
stream_resp = self.stream(
model["generator"],
model["tokenizer"],
model["model_loaded"],
prompt,
model["amqp_channel"],
incoming_headers,
outgoing_properties,
config["stop_on"],
model,
request)
return stream_resp
def load(self, model, model_options, local_path):
self.model_config = model["configuration"]
# get paths
logger.info(f"starting module {local_path}")
tokenizer_path = os.path.join(local_path, "tokenizer.model")
model_config_path = os.path.join(local_path, "config.json")
st_pattern = os.path.join(local_path, "*.safetensors")
model_path = glob.glob(st_pattern)[0]
# Create config, model, tokenizer and generator
config = ExLlamaConfig(model_config_path)
config.model_path = model_path
config.compress_pos_emb = model["configuration"].get("compress_pos_emb", 1.0)
config.max_seq_len = model["configuration"].get("max_seq_len", 2048)
config.matmul_recons_thd = 8
config.fused_mlp_thd = 2
config.sdp_thd = 8
# set model device
if model_options["device"].startswith("split"):
device_map = model_options["device"].split(':')[1]
config.set_auto_map(device_map)
elif model_options["device"].startswith("cuda"):
device_number = int(model_options["device"].split(':')[1])
device_array = [0]*12 | used_memory, free_memory, total_memory = get_gpu_memory_usage(device_number) | 1 | 2023-11-06 19:03:07+00:00 | 8k |
m4rkw/monzo-utils | monzo_utils/model/flex_summary.py | [
{
"identifier": "Payment",
"path": "monzo_utils/model/payment.py",
"snippet": "class Payment:\n\n transaction_type = 'money_out'\n always_fixed = False\n\n def __init__(self, config, payment_list_config, payment_config, last_salary_date, next_salary_date, following_salary_date):\n self.config = config\n self.payment_list_config = payment_list_config\n self.payment_config = payment_config\n self.last_salary_date = last_salary_date\n self.next_salary_date = next_salary_date\n self.following_salary_date = following_salary_date\n\n self.today = datetime.datetime.now()\n\n self.cache = {}\n\n\n def data(self, abbreviate=False):\n if self.num_paid is not None:\n suffix = '%d/%d' % (\n self.num_paid,\n self.num_total\n )\n else:\n suffix = ''\n\n if self.remaining is not None:\n remaining = self.remaining\n else:\n remaining = None\n\n return {\n 'status': self.status,\n 'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),\n 'name': self.name,\n 'suffix': suffix,\n 'amount': self.display_amount,\n 'remaining': remaining,\n 'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,\n 'due_date': self.short_date(self.due_date) if abbreviate else self.due_date\n }\n\n\n def abbreviate(self, string):\n abbreviated = ''\n\n for i in range(0, len(string)):\n if string[i].isupper():\n abbreviated += string[i]\n\n return abbreviated\n\n\n def short_date(self, date):\n if not date:\n return None\n\n return date.strftime('%d/%m/%y')\n\n\n def display(self):\n data = self.data()\n\n print(\"%s: %s %s %s %s %s %s %s\" % (\n data['status'].rjust(7),\n data['payment_type'].ljust(15),\n data['name'].ljust(25),\n data['suffix'].ljust(5),\n ('£%.2f' % (data['amount'])).ljust(8),\n ('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),\n data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),\n data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''\n ))\n\n\n @property\n def name(self):\n return self.payment_config['name']\n\n\n @property\n def status(self):\n if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:\n return 'SKIPPED'\n\n if 'yearly_month' in self.payment_config:\n if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:\n return 'SKIPPED'\n\n if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:\n return 'SKIPPED'\n\n if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:\n return 'SKIPPED'\n\n if self.last_date and self.last_date >= self.last_salary_date:\n return 'PAID'\n\n if self.due_date and self.due_date >= self.next_salary_date:\n return 'SKIPPED'\n\n return 'DUE'\n\n\n @property\n def payment_type(self):\n return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')\n\n\n @property\n def num_paid(self):\n return None\n\n\n @property\n def num_total(self):\n if 'months' in self.payment_config:\n return self.payment_config['months']\n\n return None\n\n\n @property\n def remaining(self):\n pass\n\n\n @property\n def display_amount(self):\n today = datetime.datetime.now()\n today = datetime.date(today.year, today.month, today.day)\n\n if 'last_amount_overrides' in Config().keys and \\\n self.payment_config['name'] in Config().last_amount_overrides and \\\n self.last_salary_amount in Config().last_amount_overrides[self.payment_config['name']]:\n\n amount = Config().last_amount_overrides[self.payment_config['name']][self.last_salary_amount]\n elif 'renewal' in self.payment_config and (today >= self.payment_config['renewal']['date'] or self.status == 'PAID'):\n if 'first_payment' in self.payment_config['renewal'] and today <= self.payment_config['renewal']['date']:\n amount = self.payment_config['renewal']['first_payment']\n else:\n if self.last_date >= self.payment_config['renewal']['date']:\n amount = float(getattr(self.last_payment, self.transaction_type))\n else:\n amount = self.payment_config['renewal']['amount']\n\n elif self.last_payment:\n amount = float(getattr(self.last_payment, self.transaction_type))\n else:\n amount = self.payment_config['amount']\n\n if self.transaction_type == 'money_in':\n return 0 - amount\n\n return amount\n\n\n @property\n def last_date(self):\n if 'last_date' in self.cache:\n return self.cache['last_date']\n\n if 'last_date_overrides' in self.config and \\\n self.payment_config['name'] in self.config['last_date_overrides'] and \\\n self.last_salary_date in self.config['last_date_overrides'][self.payment_config['name']]:\n\n self.cache['last_date'] = self.config['last_date_overrides'][self.payment_config['name']][self.last_salary_date]\n\n return self.cache['last_date']\n\n if 'desc' not in self.payment_config:\n self.cache['last_date'] = None\n\n return self.cache['last_date']\n\n if self.last_payment:\n self.cache['last_date'] = self.last_payment.date\n else:\n if self.older_last_payment is not None:\n self.cache['last_date'] = self.older_last_payment.date\n else:\n self.cache['last_date'] = None\n\n return self.cache['last_date']\n\n\n @property\n def last_payment(self):\n if 'last_payment' in self.cache:\n return self.cache['last_payment']\n\n if 'desc' not in self.payment_config:\n self.payment_config['desc'] = type(self).__name__\n\n where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]\n\n if 'start_date' in self.payment_config:\n where.append({\n 'clause': '`date` >= %s',\n 'params': [self.payment_config['start_date']]\n })\n\n if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:\n method_name = f\"find_all_by_declined_and_{self.transaction_type}_and_description\"\n\n transactions = getattr(Transaction(), method_name)(\n 0,\n self.payment_config['amount'],\n self.payment_config['desc'],\n orderby='created_at',\n orderdir='desc',\n search=['description'],\n where=where\n )\n else:\n transactions = Transaction().find_all_by_declined_and_description(\n 0,\n self.payment_config['desc'],\n orderby='created_at',\n orderdir='desc',\n search=['description'],\n where=where\n )\n\n for transaction in transactions:\n if 'start_date' in self.payment_config and transaction.date < self.payment_config['start_date']:\n continue\n\n if transaction.id not in Transactions().seen:\n Transactions().seen[transaction.id] = 1\n\n self.cache['last_payment'] = transaction\n\n return self.cache['last_payment']\n\n self.cache['last_payment'] = None\n\n return self.cache['last_payment']\n\n\n # older last payment, may be before start_date\n @property\n def older_last_payment(self):\n if 'older_last_payment' in self.cache:\n return self.cache['older_last_payment']\n\n if 'desc' not in self.payment_config:\n self.payment_config['desc'] = type(self).__name__\n\n where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]\n\n if 'start_date' in self.payment_config:\n where.append({'clause': 'created_at >= %s', 'params': [self.payment_config['start_date']]})\n\n if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:\n method_name = f\"find_all_by_declined_and_{self.transaction_type}_and_description\"\n\n transactions = getattr(Transaction(), method_name)(\n 0,\n self.payment_config['amount'],\n self.payment_config['desc'],\n orderby='created_at',\n orderdir='desc',\n search=['description'],\n where=where\n )\n else:\n transactions = Transaction().find_all_by_declined_and_description(\n 0,\n self.payment_config['desc'],\n orderby='created_at',\n orderdir='desc',\n search=['description'],\n where=where\n )\n\n for transaction in transactions:\n if transaction.id not in Transactions().seen:\n Transactions().seen[transaction.id] = 1\n\n self.cache['older_last_payment'] = transaction\n\n return self.cache['older_last_payment']\n\n self.cache['older_last_payment'] = None\n\n return self.cache['older_last_payment']\n\n\n @property\n def due_date(self):\n if 'yearly_month' in self.payment_config:\n day = self.today + datetime.timedelta(days=1)\n\n while day.month != self.payment_config['yearly_month'] or day.day != self.payment_config['yearly_day']:\n day += datetime.timedelta(days=1)\n\n return datetime.date(day.year, day.month, day.day)\n\n if 'renew_date' in self.payment_config:\n return self.payment_config['renew_date']\n\n if not self.last_date:\n if 'start_date' in self.payment_config:\n return self.payment_config['start_date']\n\n return None\n\n if self.last_date.month == 12:\n due_date = datetime.date(self.last_date.year+1, 1, self.last_date.day)\n else:\n due_date = datetime.date(self.last_date.year, self.last_date.month+1, self.last_date.day)\n\n if 'start_date' in self.payment_config and due_date < self.payment_config['start_date']:\n return self.payment_config['start_date']\n\n if 'yearly_month' not in self.payment_config:\n if 'exclude_months' in self.payment_config:\n while due_date.month in self.payment_config['exclude_months']:\n if self.last_date.month == 12:\n due_date = datetime.date(due_date.year+1, 1, due_date.day)\n else:\n due_date = datetime.date(due_date.year, due_date.month+1, due_date.day)\n\n return due_date\n\n\n @property\n def due_next_month(self):\n if 'renew_date' in self.payment_config:\n return self.payment_config['renew_date'] < self.following_salary_date\n\n if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.following_salary_date:\n return False\n\n if self.due_date is None:\n return True\n\n return self.due_date < self.following_salary_date\n\n\n def yearly_payment_due_this_month(self, payment, last_salary_date):\n date_from = last_salary_date.strftime('%Y-%m-%d')\n date = last_salary_date\n\n while date.day <= 15:\n date += datetime.timedelta(days=1)\n\n while date.day != 15:\n date += datetime.timedelta(days=1)\n\n date_to = date.strftime('%Y-%m-%d')\n\n due_date = str(last_salary_date.year) + '-' + (str(payment['yearly_month']).rjust(2,'0')) + '-' + (str(payment['yearly_day']).rjust(2,'0'))\n\n return due_date >= date_from and due_date <= date_to"
},
{
"identifier": "Account",
"path": "monzo_utils/model/account.py",
"snippet": "class Account(BaseModel):\n\n DISPLAY_KEYS = ['name','sortcode','account_no','balance','available']\n\n\n def __init__(self, attrs={}):\n super().__init__(attrs)\n\n\n def transactions(self, orderby='created_at', orderdir='asc', limit=None):\n return super().related('Transaction', 'account_id', self.id, orderby, orderdir, limit)\n\n\n def pots(self, orderby='name', orderdir='asc', limit=None):\n return super().related('Pot', 'account_id', self.id, orderby, orderdir, limit, deleted=0)\n\n\n @property\n def __dict__(self):\n attrs = {'attrs': self.attrs}\n\n for pot in self.pots(orderby='name'):\n attrs['attrs'][pot.name] = pot.balance\n\n return attrs\n\n\n @property\n def keys(self):\n keys = []\n\n for key in self.DISPLAY_KEYS.copy():\n if '-t' in sys.argv and ((key == 'sortcode' and self.sortcode is None) or \\\n (key == 'account_no' and self.account_no is None)):\n continue\n\n keys.append(key)\n\n for pot in self.pots(orderby='name'):\n if pot.name not in keys:\n keys.append(pot.name)\n\n return keys\n\n\n def last_salary_transaction(self, description, payment_day, salary_minimum):\n return DB().find_transaction_by_account_id_and_declined_and_description(\n self.id,\n 0,\n description,\n orderby='created_at',\n orderdir='desc',\n limit=1,\n search=['description'],\n where=[{\n 'clause': 'money_in >= %s',\n 'params': [salary_minimum]\n }]\n )"
},
{
"identifier": "Transaction",
"path": "monzo_utils/model/transaction.py",
"snippet": "class Transaction(BaseModel):\n\n DISPLAY_KEYS = ['date','type','money_in','money_out','pending','description']\n RELATIONSHIPS = {\n 'account': ['`transaction`.account_id', 'account.id'],\n 'transaction_metadata': ['`transaction`.id', 'transaction_metadata.transaction_id'],\n 'pot': ['`transaction`.pot_id', 'pot.id']\n }"
}
] | import datetime
import math
from monzo_utils.model.payment import Payment
from monzo_utils.model.account import Account
from monzo_utils.model.transaction import Transaction | 4,266 |
class FlexSummary(Payment):
def __init__(self, config, total, total_next_month, remaining, last_salary_date):
self.config = config
self.flex_total = total
self.flex_total_next_month = total_next_month
self.flex_remaining = remaining
self.last_salary_date = last_salary_date
self.cache = {}
@property
def status(self):
if self.last_payment and self.last_payment.date >= self.last_salary_date:
return 'PAID'
return 'DUE'
@property
def name(self):
return 'Flex Payment'
@property
def display_amount(self):
return self.flex_total
@property
def last_date(self):
if self.last_payment:
return self.last_payment.date
last_date = datetime.datetime.now()
while last_date.day != self.config['flex_payment_date']:
last_date -= datetime.timedelta(days=1)
return last_date
@property
def due_date(self):
if 'due_date' in self.cache:
return self.cache['due_date']
if self.last_payment:
due_date = self.last_payment.date
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
due_date += datetime.timedelta(days=1)
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
return due_date
date = datetime.datetime.now() + datetime.timedelta(days=1)
while date.day != self.config['flex_payment_date']:
date += datetime.timedelta(days=1)
return datetime.date(date.year, date.month, date.day)
@property
def remaining(self):
return self.flex_remaining
def display(self):
super().display()
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
'SKIPPED'.rjust(7),
data['payment_type'].ljust(15),
'Flex Payment next month'.ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (self.flex_total_next_month)).ljust(8),
('£%.2f' % (data['remaining'] - self.flex_total_next_month)).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
account = Account().find_by_name(self.config['flex_account'])
where = [{'clause': 'date > %s', 'params': [self.last_salary_date]}]
|
class FlexSummary(Payment):
def __init__(self, config, total, total_next_month, remaining, last_salary_date):
self.config = config
self.flex_total = total
self.flex_total_next_month = total_next_month
self.flex_remaining = remaining
self.last_salary_date = last_salary_date
self.cache = {}
@property
def status(self):
if self.last_payment and self.last_payment.date >= self.last_salary_date:
return 'PAID'
return 'DUE'
@property
def name(self):
return 'Flex Payment'
@property
def display_amount(self):
return self.flex_total
@property
def last_date(self):
if self.last_payment:
return self.last_payment.date
last_date = datetime.datetime.now()
while last_date.day != self.config['flex_payment_date']:
last_date -= datetime.timedelta(days=1)
return last_date
@property
def due_date(self):
if 'due_date' in self.cache:
return self.cache['due_date']
if self.last_payment:
due_date = self.last_payment.date
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
due_date += datetime.timedelta(days=1)
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
return due_date
date = datetime.datetime.now() + datetime.timedelta(days=1)
while date.day != self.config['flex_payment_date']:
date += datetime.timedelta(days=1)
return datetime.date(date.year, date.month, date.day)
@property
def remaining(self):
return self.flex_remaining
def display(self):
super().display()
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
'SKIPPED'.rjust(7),
data['payment_type'].ljust(15),
'Flex Payment next month'.ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (self.flex_total_next_month)).ljust(8),
('£%.2f' % (data['remaining'] - self.flex_total_next_month)).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
account = Account().find_by_name(self.config['flex_account'])
where = [{'clause': 'date > %s', 'params': [self.last_salary_date]}]
| transaction = Transaction().find_by_account_id_and_declined_and_money_in_and_description( | 2 | 2023-11-05 12:48:18+00:00 | 8k |
rossiyareich/inknhue | train.py | [
{
"identifier": "ConditionalDataset",
"path": "src/conditional/conditional_dataset.py",
"snippet": "class ConditionalDataset(Dataset):\n def __init__(self, dataset_path, transform=None):\n self.dataset_path = dataset_path\n self.transform = transform\n self.cond_dataset = []\n\n style2paints = get_entries(f\"{dataset_path}/style2paints/*.png\")\n colored = get_entries(f\"{dataset_path}/colored/*.png\")\n\n assert len(style2paints) == len(colored)\n\n for s, c in zip(style2paints, colored):\n self.cond_dataset.append({\"style2paints\": s, \"colored\": c})\n\n def __len__(self):\n return len(self.cond_dataset)\n\n def __getitem__(self, idx):\n s = Image.open(self.cond_dataset[idx][\"style2paints\"]).convert(\"RGB\")\n c = Image.open(self.cond_dataset[idx][\"colored\"]).convert(\"RGB\")\n g = c.convert(\"L\").convert(\"RGB\")\n\n if self.transform is not None:\n return self.transform(g, s, c)\n\n return g, s, c"
},
{
"identifier": "ConditionalDecoder",
"path": "src/conditional/conditional_decoder.py",
"snippet": "class ConditionalDecoder(nn.Module):\n def __init__(\n self,\n *,\n channels: int,\n channel_multipliers: List[int],\n n_resnet_blocks: int,\n out_channels: int,\n z_channels: int\n ) -> None:\n super().__init__()\n\n # Number of blocks of different resolutions.\n # The resolution is halved at the end each top level block\n num_resolutions = len(channel_multipliers)\n\n # Number of channels in each top level block, in the reverse order\n channels_list = [m * channels for m in channel_multipliers]\n\n # Number of channels in the top-level block\n channels = channels_list[-1]\n\n # Initial $3 \\times 3$ convolution layer that maps the embedding space to `channels`\n self.conv_in = nn.Conv2d(z_channels, channels, 3, stride=1, padding=1)\n\n # ResNet blocks with attention\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(channels, channels)\n self.mid.attn_1 = AttnBlock(channels)\n self.mid.block_2 = ResnetBlock(channels, channels)\n\n # List of top-level blocks\n self.up = nn.ModuleList()\n\n # Create top-level blocks\n for i in reversed(range(num_resolutions)):\n # Each top level block consists of multiple ResNet Blocks and up-sampling\n resnet_blocks = nn.ModuleList()\n\n # Add ResNet Blocks\n for _ in range(n_resnet_blocks + 1):\n resnet_blocks.append(ResnetBlock(channels, channels_list[i]))\n channels = channels_list[i]\n\n # Top-level block\n up = nn.Module()\n up.block = resnet_blocks\n\n # Up-sampling at the end of each top level block except the first\n if i != 0:\n up.upsample = UpSample(channels)\n else:\n up.upsample = nn.Identity()\n\n # Prepend to be consistent with the checkpoint\n self.up.insert(0, up)\n\n # Map to image space with a $3 \\times 3$ convolution\n self.norm_out = normalization(channels)\n self.conv_out = nn.Conv2d(channels, out_channels, 3, stride=1, padding=1)\n\n def forward(self, z: torch.Tensor, conds_z: List[torch.Tensor]) -> torch.Tensor:\n # Map to `channels` with the initial convolution\n h = self.conv_in(z)\n\n # ResNet blocks with attention\n h = self.mid.block_1(h)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h)\n\n # Top-level blocks\n for up, cond_z in reversed(list(zip(self.up, conds_z))):\n # ResNet Blocks\n for block in up.block:\n h = block(h)\n\n h += cond_z\n\n # Up-sampling\n h = up.upsample(h)\n\n # Normalize and map to image space\n h = self.norm_out(h)\n h = swish(h)\n img = self.conv_out(h)\n\n return img"
},
{
"identifier": "ConditionalEncoder",
"path": "src/conditional/conditional_encoder.py",
"snippet": "class ConditionalEncoder(nn.Module):\n def __init__(\n self,\n *,\n channels: int,\n channel_multipliers: List[int],\n n_resnet_blocks: int,\n in_channels: int,\n ) -> None:\n super().__init__()\n\n # Number of blocks of different resolutions.\n # The resolution is halved at the end each top level block\n n_resolutions = len(channel_multipliers)\n\n # Initial $3 \\times 3$ convolution layer that maps the image to `channels`\n self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)\n\n # Number of channels in each top level block\n channels_list = [m * channels for m in [1] + channel_multipliers]\n\n # List of top-level blocks\n self.down = nn.ModuleList()\n self.proj = nn.ModuleList()\n\n # Create top-level blocks\n for i in range(n_resolutions):\n # Each top level block consists of multiple ResNet Blocks and down-sampling\n resnet_blocks = nn.ModuleList()\n\n # Add ResNet Blocks\n for _ in range(n_resnet_blocks):\n resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))\n channels = channels_list[i + 1]\n\n # Top-level block\n down = nn.Module()\n down.block = resnet_blocks\n\n # Down-sampling at the end of each top level block except the last\n if i != n_resolutions - 1:\n down.downsample = DownSample(channels)\n else:\n down.downsample = nn.Identity()\n\n self.down.append(down)\n\n # Projection\n proj = nn.Conv2d(channels, channels, 1, 1, 0)\n proj = zero_module(proj)\n self.proj.append(proj)\n\n def forward(self, cond: torch.Tensor) -> List[torch.Tensor]:\n # Map to `channels` with the initial convolution\n x = self.conv_in(cond)\n\n conds_z = []\n\n # Top-level blocks\n for down, proj in zip(self.down, self.proj):\n # ResNet Blocks\n for block in down.block:\n x = block(x)\n\n conds_z.append(proj(x))\n\n # Down-sampling\n x = down.downsample(x)\n\n return conds_z"
},
{
"identifier": "Encoder",
"path": "src/encoder.py",
"snippet": "class Encoder(nn.Module):\n \"\"\"\n ## Encoder module\n \"\"\"\n\n def __init__(\n self,\n *,\n channels: int,\n channel_multipliers: List[int],\n n_resnet_blocks: int,\n in_channels: int,\n z_channels: int\n ):\n \"\"\"\n :param channels: is the number of channels in the first convolution layer\n :param channel_multipliers: are the multiplicative factors for the number of channels in the\n subsequent blocks\n :param n_resnet_blocks: is the number of resnet layers at each resolution\n :param in_channels: is the number of channels in the image\n :param z_channels: is the number of channels in the embedding space\n \"\"\"\n super().__init__()\n\n # Number of blocks of different resolutions.\n # The resolution is halved at the end each top level block\n n_resolutions = len(channel_multipliers)\n\n # Initial $3 \\times 3$ convolution layer that maps the image to `channels`\n self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)\n\n # Number of channels in each top level block\n channels_list = [m * channels for m in [1] + channel_multipliers]\n\n # List of top-level blocks\n self.down = nn.ModuleList()\n # Create top-level blocks\n for i in range(n_resolutions):\n # Each top level block consists of multiple ResNet Blocks and down-sampling\n resnet_blocks = nn.ModuleList()\n # Add ResNet Blocks\n for _ in range(n_resnet_blocks):\n resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))\n channels = channels_list[i + 1]\n # Top-level block\n down = nn.Module()\n down.block = resnet_blocks\n # Down-sampling at the end of each top level block except the last\n if i != n_resolutions - 1:\n down.downsample = DownSample(channels)\n else:\n down.downsample = nn.Identity()\n\n self.down.append(down)\n\n # Final ResNet blocks with attention\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(channels, channels)\n self.mid.attn_1 = AttnBlock(channels)\n self.mid.block_2 = ResnetBlock(channels, channels)\n\n # Map to embedding space with a $3 \\times 3$ convolution\n self.norm_out = normalization(channels)\n self.conv_out = nn.Conv2d(channels, 2 * z_channels, 3, stride=1, padding=1)\n\n def forward(self, img: torch.Tensor):\n \"\"\"\n :param img: is the image tensor with shape `[batch_size, img_channels, img_height, img_width]`\n \"\"\"\n\n # Map to `channels` with the initial convolution\n x = self.conv_in(img)\n\n # Top-level blocks\n for down in self.down:\n # ResNet Blocks\n for block in down.block:\n x = block(x)\n # Down-sampling\n x = down.downsample(x)\n\n # Final ResNet blocks with attention\n x = self.mid.block_1(x)\n x = self.mid.attn_1(x)\n x = self.mid.block_2(x)\n\n # Normalize and map to embedding space\n x = self.norm_out(x)\n x = swish(x)\n x = self.conv_out(x)\n\n return x"
},
{
"identifier": "GaussianDistribution",
"path": "src/gaussian_distribution.py",
"snippet": "class GaussianDistribution:\n \"\"\"\n ## Gaussian Distribution\n \"\"\"\n\n def __init__(self, parameters):\n \"\"\"\n :param parameters: are the means and log of variances of the embedding of shape\n `[batch_size, z_channels * 2, z_height, z_height]`\n \"\"\"\n self.parameters = parameters\n # Split mean and log of variance\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n # Clamp the log of variances\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n # Calculate standard deviation\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n\n def sample(self):\n # Sample from the distribution\n x = self.mean + self.std * torch.randn_like(\n self.std, dtype=self.std.dtype, device=self.std.device\n )\n return x\n\n def kl(self, other=None):\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean"
},
{
"identifier": "LPIPSWithDiscriminator",
"path": "src/perceptual_loss.py",
"snippet": "class LPIPSWithDiscriminator(nn.Module):\n def __init__(\n self,\n disc_start,\n disc_num_layers=3,\n disc_in_channels=3,\n disc_factor=1.0,\n disc_weight=1.0,\n disc_loss=\"hinge\",\n kl_weight=1.0,\n perceptual_weight=1.0,\n use_actnorm=False,\n ):\n super().__init__()\n\n assert disc_loss in [\"hinge\", \"vanilla\"]\n self.discriminator = NLayerDiscriminator(\n input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm\n ).apply(weights_init)\n self.discriminator_iter_start = disc_start\n self.disc_loss = hinge_d_loss if disc_loss == \"hinge\" else vanilla_d_loss\n self.disc_factor = disc_factor\n self.discriminator_weight = disc_weight\n\n # self.kl_weight = kl_weight\n self.perceptual_loss = LPIPS().eval()\n self.perceptual_weight = perceptual_weight\n self.logvar = nn.Parameter(torch.ones(size=()) * 0)\n\n def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer):\n nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]\n g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]\n d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)\n d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()\n d_weight *= self.discriminator_weight\n return d_weight\n\n def forward(\n self,\n inputs,\n reconstructions,\n posteriors,\n optimizer_idx,\n global_step,\n last_layer,\n cond,\n ):\n rec_loss = torch.abs(cond.contiguous() - reconstructions.contiguous())\n if self.perceptual_weight > 0:\n p_loss = self.perceptual_loss(\n cond.contiguous(), reconstructions.contiguous()\n )\n rec_loss += self.perceptual_weight * p_loss\n nll_loss = torch.mean(rec_loss)\n\n if optimizer_idx == 0:\n logits_fake = self.discriminator(reconstructions.contiguous())\n g_loss = -torch.mean(logits_fake)\n\n if self.disc_factor > 0.0:\n try:\n d_weight = self.calculate_adaptive_weight(\n nll_loss, g_loss, last_layer=last_layer\n )\n except RuntimeError:\n assert not self.training\n d_weight = torch.tensor(0.0)\n else:\n d_weight = torch.tensor(0.0)\n\n disc_factor = adopt_weight(\n self.disc_factor, global_step, threshold=self.discriminator_iter_start\n )\n loss = nll_loss + d_weight * disc_factor * g_loss\n\n log = {\n \"total_loss\": loss.clone().detach().mean(),\n \"rec_loss\": rec_loss.detach().mean(),\n \"d_weight\": d_weight.detach(),\n \"disc_factor\": torch.tensor(disc_factor),\n \"g_loss\": g_loss.detach().mean(),\n }\n\n return loss, log\n\n if optimizer_idx == 1:\n logits_real = self.discriminator(cond.contiguous().detach())\n logits_fake = self.discriminator(reconstructions.contiguous().detach())\n\n disc_factor = adopt_weight(\n self.disc_factor, global_step, threshold=self.discriminator_iter_start\n )\n d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)\n\n log = {\n \"disc_loss\": d_loss.clone().detach().mean(),\n \"logits_real\": logits_real.detach().mean(),\n \"logits_fake\": logits_fake.detach().mean(),\n }\n\n return d_loss, log"
},
{
"identifier": "resize",
"path": "src/utils.py",
"snippet": "def resize(img, size):\n w, h = img.size\n if w != size or h != size:\n if w <= h:\n h = int(float(h) * float(size) / float(w))\n w = size\n else:\n w = int(float(w) * float(size) / float(h))\n h = size\n img = img.resize((w, h), Image.Resampling.LANCZOS)\n\n return img"
}
] | import argparse
import copy
import gc
import logging
import os
import numpy as np
import torch
import torch.backends.cuda
import torch.backends.cudnn
import torch.optim as optim
import torchvision.transforms.functional as VF
import wandb
from accelerate import Accelerator
from omegaconf import OmegaConf
from rich.traceback import install
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from tqdm.auto import tqdm
from src.conditional.conditional_dataset import ConditionalDataset
from src.conditional.conditional_decoder import ConditionalDecoder
from src.conditional.conditional_encoder import ConditionalEncoder
from src.encoder import Encoder
from src.gaussian_distribution import GaussianDistribution
from src.perceptual_loss import LPIPSWithDiscriminator
from src.utils import resize | 5,534 | "architecture": conf.wandb.config.architecture,
"base_lr": conf.params.base_lr,
"epoch": conf.params.epoch,
},
)
# Load models
logging.info("Setting up models")
# Convolution to map from embedding space to quantized embedding space moments
quant_conv = nn.Conv2d(
2 * pretrained_yaml.params.ddconfig.z_channels,
2 * pretrained_yaml.params.embed_dim,
1,
)
# Convolution to map from quantized embedding space back to embedding space
post_quant_conv = nn.Conv2d(
pretrained_yaml.params.embed_dim,
pretrained_yaml.params.ddconfig.z_channels,
1,
)
encoder = Encoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
cond_encoder = ConditionalEncoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
)
cond_decoder = ConditionalDecoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
out_channels=pretrained_yaml.params.ddconfig.out_ch,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
discriminator = LPIPSWithDiscriminator(
disc_start=pretrained_yaml.params.lossconfig.disc_start,
disc_weight=pretrained_yaml.params.lossconfig.disc_weight,
kl_weight=pretrained_yaml.params.lossconfig.kl_weight,
)
# Setup flags
logging.info("Setting up flags")
quant_conv.requires_grad_(False)
post_quant_conv.requires_grad_(False)
encoder.requires_grad_(False)
cond_encoder.train()
cond_decoder.train()
discriminator.train()
# Load state_dicts
logging.info("Loading state_dicts")
quant_conv_state_dict = {}
post_quant_conv_state_dict = {}
encoder_state_dict = {}
cond_encoder_state_dict = {}
cond_decoder_state_dict = {}
loss_state_dict = {}
for k, v in pretrained_ckpt["state_dict"].items():
if k.startswith("quant_conv"):
quant_conv_state_dict[k.replace("quant_conv.", "", 1)] = v
elif k.startswith("post_quant_conv"):
post_quant_conv_state_dict[k.replace("post_quant_conv.", "", 1)] = v
elif k.startswith("encoder"):
encoder_state_dict[k.replace("encoder.", "", 1)] = v
if not (
k.startswith("encoder.mid")
or k.startswith("encoder.norm_out")
or k.startswith("encoder.conv_out")
):
cond_encoder_state_dict[k.replace("encoder.", "", 1)] = copy.deepcopy(v)
elif k.startswith("decoder"):
cond_decoder_state_dict[k.replace("decoder.", "", 1)] = v
elif k.startswith("loss"):
loss_state_dict[k.replace("loss.", "", 1)] = v
else:
raise KeyError(f"Unexpected state_dict key: {k}")
quant_conv.load_state_dict(quant_conv_state_dict, strict=True)
post_quant_conv.load_state_dict(post_quant_conv_state_dict, strict=True)
encoder.load_state_dict(encoder_state_dict, strict=True)
cond_encoder.load_state_dict(cond_encoder_state_dict, strict=False)
cond_decoder.load_state_dict(cond_decoder_state_dict, strict=True)
discriminator.load_state_dict(loss_state_dict, strict=True)
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s, c):
g, s, c = (
resize(g, conf.params.size),
resize(s, conf.params.size),
resize(c, conf.params.size),
)
i, j, h, w = transforms.RandomCrop.get_params(
img=g,
output_size=(
conf.params.crop_size,
conf.params.crop_size,
),
)
g, s, c = VF.crop(g, i, j, h, w), VF.crop(s, i, j, h, w), VF.crop(c, i, j, h, w)
pil_to_tensor = transforms.PILToTensor()
g, s, c = pil_to_tensor(g), pil_to_tensor(s), pil_to_tensor(c)
g, s, c = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((c / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
return g, s, c
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create checkpoint directory
logging.info("Creating checkpoint directory")
os.makedirs(conf.paths.checkpoint_path, exist_ok=True)
# Allow TF32
logging.info("Allowing TF32")
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# Load Accelerate
logging.info("Setting up Accelerate")
accelerator = Accelerator()
# Load pretrained parameters
logging.info("Loading pretrained checkpoints")
pretrained_ckpt = torch.load(conf.paths.pretrained_ckpt)
pretrained_yaml = OmegaConf.load(conf.paths.pretrained_yaml)
# Load wandb
logging.info("Setting up wandb")
wandb.init(
project=conf.wandb.project,
config={
"optimizer": conf.wandb.config.optimizer,
"architecture": conf.wandb.config.architecture,
"base_lr": conf.params.base_lr,
"epoch": conf.params.epoch,
},
)
# Load models
logging.info("Setting up models")
# Convolution to map from embedding space to quantized embedding space moments
quant_conv = nn.Conv2d(
2 * pretrained_yaml.params.ddconfig.z_channels,
2 * pretrained_yaml.params.embed_dim,
1,
)
# Convolution to map from quantized embedding space back to embedding space
post_quant_conv = nn.Conv2d(
pretrained_yaml.params.embed_dim,
pretrained_yaml.params.ddconfig.z_channels,
1,
)
encoder = Encoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
cond_encoder = ConditionalEncoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
)
cond_decoder = ConditionalDecoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
out_channels=pretrained_yaml.params.ddconfig.out_ch,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
discriminator = LPIPSWithDiscriminator(
disc_start=pretrained_yaml.params.lossconfig.disc_start,
disc_weight=pretrained_yaml.params.lossconfig.disc_weight,
kl_weight=pretrained_yaml.params.lossconfig.kl_weight,
)
# Setup flags
logging.info("Setting up flags")
quant_conv.requires_grad_(False)
post_quant_conv.requires_grad_(False)
encoder.requires_grad_(False)
cond_encoder.train()
cond_decoder.train()
discriminator.train()
# Load state_dicts
logging.info("Loading state_dicts")
quant_conv_state_dict = {}
post_quant_conv_state_dict = {}
encoder_state_dict = {}
cond_encoder_state_dict = {}
cond_decoder_state_dict = {}
loss_state_dict = {}
for k, v in pretrained_ckpt["state_dict"].items():
if k.startswith("quant_conv"):
quant_conv_state_dict[k.replace("quant_conv.", "", 1)] = v
elif k.startswith("post_quant_conv"):
post_quant_conv_state_dict[k.replace("post_quant_conv.", "", 1)] = v
elif k.startswith("encoder"):
encoder_state_dict[k.replace("encoder.", "", 1)] = v
if not (
k.startswith("encoder.mid")
or k.startswith("encoder.norm_out")
or k.startswith("encoder.conv_out")
):
cond_encoder_state_dict[k.replace("encoder.", "", 1)] = copy.deepcopy(v)
elif k.startswith("decoder"):
cond_decoder_state_dict[k.replace("decoder.", "", 1)] = v
elif k.startswith("loss"):
loss_state_dict[k.replace("loss.", "", 1)] = v
else:
raise KeyError(f"Unexpected state_dict key: {k}")
quant_conv.load_state_dict(quant_conv_state_dict, strict=True)
post_quant_conv.load_state_dict(post_quant_conv_state_dict, strict=True)
encoder.load_state_dict(encoder_state_dict, strict=True)
cond_encoder.load_state_dict(cond_encoder_state_dict, strict=False)
cond_decoder.load_state_dict(cond_decoder_state_dict, strict=True)
discriminator.load_state_dict(loss_state_dict, strict=True)
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s, c):
g, s, c = (
resize(g, conf.params.size),
resize(s, conf.params.size),
resize(c, conf.params.size),
)
i, j, h, w = transforms.RandomCrop.get_params(
img=g,
output_size=(
conf.params.crop_size,
conf.params.crop_size,
),
)
g, s, c = VF.crop(g, i, j, h, w), VF.crop(s, i, j, h, w), VF.crop(c, i, j, h, w)
pil_to_tensor = transforms.PILToTensor()
g, s, c = pil_to_tensor(g), pil_to_tensor(s), pil_to_tensor(c)
g, s, c = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((c / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
return g, s, c
| cond_dataset = cond_dataset_full = ConditionalDataset( | 0 | 2023-11-03 09:35:30+00:00 | 8k |
TencentBlueKing/bkflow-feel | bkflow_feel/transformer.py | [
{
"identifier": "RangeGroupOperator",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupOperator(enum.Enum):\n GT = \"greater than\"\n GTE = \"greater than or equal\"\n LT = \"less than\"\n LTE = \"less than or equal\""
},
{
"identifier": "AfterFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class AfterFunc(BinaryOperator):\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n operator = None\n if isinstance(self.left, RangeGroup):\n if left_val.left_operator == RangeGroupOperator.GT:\n operator = RangeGroupOperator.GTE\n left_val = left_val.left_val\n if isinstance(self.right, RangeGroup):\n if right_val.right_operator == RangeGroupOperator.LT:\n operator = RangeGroupOperator.GTE\n right_val = right_val.right_val\n if operator == RangeGroupOperator.GTE:\n return left_val >= right_val\n return left_val > right_val"
},
{
"identifier": "And",
"path": "bkflow_feel/parsers.py",
"snippet": "class And(BinaryOperator):\n def evaluate(self, context):\n return self.left.evaluate(context) and self.right.evaluate(context)"
},
{
"identifier": "BeforeFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class BeforeFunc(BinaryOperator):\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n operator = None\n if isinstance(self.left, RangeGroup):\n if left_val.right_operator == RangeGroupOperator.LT:\n operator = RangeGroupOperator.GTE\n left_val = left_val.right_val\n if isinstance(self.right, RangeGroup):\n if right_val.left_operator == RangeGroupOperator.GT:\n operator = RangeGroupOperator.GTE\n right_val = right_val.left_val\n\n if operator == RangeGroupOperator.GTE:\n return left_val <= right_val\n\n return left_val < right_val"
},
{
"identifier": "Between",
"path": "bkflow_feel/parsers.py",
"snippet": "class Between(Expression):\n def __init__(self, value, left, right):\n self.value = value\n self.min = left\n self.max = right\n\n def evaluate(self, context):\n value = self.value.evaluate(context)\n return self.min.evaluate(context) <= value <= self.max.evaluate(context)"
},
{
"identifier": "Boolean",
"path": "bkflow_feel/parsers.py",
"snippet": "class Boolean(CommonExpression):\n pass"
},
{
"identifier": "Context",
"path": "bkflow_feel/parsers.py",
"snippet": "class Context(Expression):\n def __init__(self, pairs):\n self.pairs = pairs\n\n def evaluate(self, context):\n return dict(pair.evaluate(context) for pair in self.pairs)"
},
{
"identifier": "ContextItem",
"path": "bkflow_feel/parsers.py",
"snippet": "class ContextItem(Expression):\n def __init__(self, expr, keys):\n self.expr = expr\n self.keys = keys\n\n def evaluate(self, context):\n result = self.expr.evaluate(context)\n for key in self.keys:\n if not isinstance(result, dict):\n return None\n result = result.get(key)\n return result"
},
{
"identifier": "Date",
"path": "bkflow_feel/parsers.py",
"snippet": "class Date(CommonExpression):\n def evaluate(self, context):\n year, month, day = self.value.split(\"-\")\n return datetime.date(int(year), int(month), int(day))"
},
{
"identifier": "DateAndTime",
"path": "bkflow_feel/parsers.py",
"snippet": "class DateAndTime(Expression):\n def __init__(self, date: Date, time: Time):\n self.date = date\n self.time = time\n\n def evaluate(self, context):\n date = self.date.evaluate(context)\n time = self.time.evaluate(context)\n return datetime.datetime.combine(date, time, tzinfo=time.tzinfo)"
},
{
"identifier": "DayOfWeekFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class DayOfWeekFunc(CommonExpression):\n WEEKDAYS = [\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\",\n ]\n\n def evaluate(self, context):\n date_or_datetime = self.value.evaluate(context)\n return self.WEEKDAYS[date_or_datetime.weekday()]"
},
{
"identifier": "Expr",
"path": "bkflow_feel/parsers.py",
"snippet": "class Expr(CommonExpression):\n def evaluate(self, context):\n return self.value.evaluate(context)"
},
{
"identifier": "FuncInvocation",
"path": "bkflow_feel/parsers.py",
"snippet": "class FuncInvocation(Expression):\n def __init__(self, func_name, args=None, named_args=None):\n self.func_name = func_name\n self.args = args or []\n self.named_args = named_args or {}\n\n def evaluate(self, context):\n try:\n func = FEELFunctionsManager.get_func(self.func_name)\n except Exception as e:\n logger.exception(e)\n func = None\n if not func:\n return None\n\n if self.args:\n params = [arg.evaluate(context) for arg in self.args]\n return func(*params)\n elif self.named_args:\n params = {key: arg.evaluate(context) for key, arg in self.named_args.items()}\n return func(**params)\n\n return func()"
},
{
"identifier": "FunctionCall",
"path": "bkflow_feel/parsers.py",
"snippet": "class FunctionCall(Expression):\n def __init__(self, name, args):\n self.name = name\n self.args = args\n\n def evaluate(self, context):\n function = context.get(self.name)\n if function is None:\n raise ValueError(f\"Unknown function: {self.name}\")\n return function(*[arg.evaluate(context) for arg in self.args])"
},
{
"identifier": "In",
"path": "bkflow_feel/parsers.py",
"snippet": "class In(BinaryOperator):\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n if isinstance(self.right, RangeGroup):\n left_operation = (\n left_val > right_val.left_val\n if right_val.left_operator == RangeGroupOperator.GT\n else left_val >= right_val.left_val\n )\n right_operation = (\n left_val < right_val.right_val\n if right_val.right_operator == RangeGroupOperator.LT\n else left_val <= right_val.right_val\n )\n return left_operation and right_operation\n return left_val in right_val"
},
{
"identifier": "IncludesFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class IncludesFunc(BinaryOperator):\n def evaluate(self, context):\n left_val: RangeGroupData = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n if isinstance(self.right, RangeGroup):\n left_operation = left_val.left_val <= right_val.left_val\n if left_val.left_operator == RangeGroupOperator.GT and right_val.left_operator == RangeGroupOperator.GTE:\n left_operation = left_val.left_val < right_val.left_val\n right_operation = left_val.right_val >= right_val.right_val\n if left_val.right_operator == RangeGroupOperator.LT and right_val.right_operator == RangeGroupOperator.LTE:\n right_operation = left_val.right_val > right_val.right_val\n else:\n left_operation = left_val.left_val <= right_val\n if left_val.left_operator == RangeGroupOperator.GT:\n left_operation = left_val.left_val < right_val\n right_operation = left_val.right_val >= right_val\n if left_val.right_operator == RangeGroupOperator.LT:\n right_operation = left_val.right_val > right_val\n return left_operation and right_operation"
},
{
"identifier": "List",
"path": "bkflow_feel/parsers.py",
"snippet": "class List(Expression):\n def __init__(self, *items):\n self.items = items\n\n def evaluate(self, context):\n return [item.evaluate(context) for item in self.items]"
},
{
"identifier": "ListEvery",
"path": "bkflow_feel/parsers.py",
"snippet": "class ListEvery(ListMatch):\n def evaluate(self, context):\n iter_pairs = self.evaluate_and_validate_iter_pairs(context)\n for i in range(0, len(iter_pairs[0][1])):\n tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}\n if self.expr.evaluate(tmp_context) is False:\n return False\n return True"
},
{
"identifier": "ListFilter",
"path": "bkflow_feel/parsers.py",
"snippet": "class ListFilter(Expression):\n def __init__(self, list_expr, filter_expr):\n self.list_expr = list_expr\n self.filter_expr = filter_expr\n\n def evaluate(self, context):\n items = self.list_expr.evaluate(context)\n if not isinstance(items, list):\n return None\n result = []\n for item in items:\n try:\n # 当 item 为 dict 且 filter 中对比的 key 缺失时,可能报错\n if self.filter_expr.evaluate(item if isinstance(item, dict) else {\"item\": item}):\n result.append(item)\n except Exception as e:\n logger.exception(e)\n pass\n return result"
},
{
"identifier": "ListItem",
"path": "bkflow_feel/parsers.py",
"snippet": "class ListItem(Expression):\n def __init__(self, list_expr, index):\n self.list_expr = list_expr\n self.index = index\n\n def evaluate(self, context):\n items = self.list_expr.evaluate(context)\n if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):\n return None\n items = items[self.index - 1] if self.index > 0 else items[self.index]\n return items"
},
{
"identifier": "ListOperator",
"path": "bkflow_feel/parsers.py",
"snippet": "class ListOperator(Expression):\n def __init__(self, operation, *expr):\n self.operation = operation\n self.expr = expr\n\n def evaluate(self, context):\n return getattr(self, self.operation)(context)\n\n def list_contains(self, context):\n list_ = self.expr[0].evaluate(context)\n item = self.expr[1].evaluate(context)\n return item in list_\n\n def list_count(self, context):\n list_ = self.expr[0].evaluate(context)\n return len(list_)\n\n def list_all(self, context):\n list_ = self.expr[0].evaluate(context)\n return all(list_)\n\n def list_any(self, context):\n list_ = self.expr[0].evaluate(context)\n return any(list_)"
},
{
"identifier": "ListSome",
"path": "bkflow_feel/parsers.py",
"snippet": "class ListSome(ListMatch):\n def evaluate(self, context):\n iter_pairs = self.evaluate_and_validate_iter_pairs(context)\n for i in range(0, len(iter_pairs[0][1])):\n tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}\n if self.expr.evaluate(tmp_context) is True:\n return True\n return False"
},
{
"identifier": "MonthOfYearFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class MonthOfYearFunc(CommonExpression):\n MONTH_MAPPING = {\n 1: \"January\",\n 2: \"February\",\n 3: \"March\",\n 4: \"April\",\n 5: \"May\",\n 6: \"June\",\n 7: \"July\",\n 8: \"Auguest\",\n 9: \"September\",\n 10: \"October\",\n 11: \"November\",\n 12: \"December\",\n }\n\n def evaluate(self, context):\n date_or_datetime = self.value.evaluate(context)\n return self.MONTH_MAPPING[date_or_datetime.month]"
},
{
"identifier": "Not",
"path": "bkflow_feel/parsers.py",
"snippet": "class Not(Expression):\n def __init__(self, value):\n self.value = value\n\n def evaluate(self, context):\n return not self.value.evaluate(context)"
},
{
"identifier": "NotEqual",
"path": "bkflow_feel/parsers.py",
"snippet": "class NotEqual(BinaryOperator):\n def evaluate(self, context):\n return self.left.evaluate(context) != self.right.evaluate(context)"
},
{
"identifier": "NowFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class NowFunc(Expression):\n def evaluate(self, context):\n # TODO:带时区需要配置\n return datetime.datetime.now()"
},
{
"identifier": "Null",
"path": "bkflow_feel/parsers.py",
"snippet": "class Null(Expression):\n def evaluate(self, context):\n return None"
},
{
"identifier": "Number",
"path": "bkflow_feel/parsers.py",
"snippet": "class Number(CommonExpression):\n pass"
},
{
"identifier": "Or",
"path": "bkflow_feel/parsers.py",
"snippet": "class Or(BinaryOperator):\n def evaluate(self, context):\n return self.left.evaluate(context) or self.right.evaluate(context)"
},
{
"identifier": "Pair",
"path": "bkflow_feel/parsers.py",
"snippet": "class Pair(Expression):\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def evaluate(self, context):\n return self.key.evaluate(context), self.value.evaluate(context)"
},
{
"identifier": "RangeGroup",
"path": "bkflow_feel/parsers.py",
"snippet": "class RangeGroup(BinaryOperator):\n def __init__(self, left, right, left_operator, right_operator):\n self.left = left\n self.right = right\n self.left_operator = left_operator\n self.right_operator = right_operator\n\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n data = {\n \"left_val\": left_val,\n \"right_val\": right_val,\n \"left_operator\": self.left_operator,\n \"right_operator\": self.right_operator,\n }\n return RangeGroupData(**data)"
},
{
"identifier": "SameTypeBinaryOperator",
"path": "bkflow_feel/parsers.py",
"snippet": "class SameTypeBinaryOperator(BinaryOperator):\n validator_cls = BinaryOperationValidator\n\n def __init__(self, operation, left, right):\n super().__init__(left, right)\n self.operation = operation\n\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n self.validator_cls()(left_val, right_val)\n return getattr(self, self.operation)(left_val, right_val)\n\n def add(self, left_val, right_val):\n return left_val + right_val\n\n def subtract(self, left_val, right_val):\n return left_val - right_val\n\n def multiply(self, left_val, right_val):\n return left_val * right_val\n\n def divide(self, left_val, right_val):\n return left_val / right_val\n\n def power(self, left_val, right_val):\n return left_val**right_val\n\n def equal(self, left_val, right_val):\n return left_val == right_val\n\n def less_than(self, left_val, right_val):\n return left_val < right_val\n\n def greater_than(self, left_val, right_val):\n return left_val > right_val\n\n def less_than_or_equal(self, left_val, right_val):\n return left_val <= right_val\n\n def greater_than_or_equal(self, left_val, right_val):\n return left_val >= right_val"
},
{
"identifier": "String",
"path": "bkflow_feel/parsers.py",
"snippet": "class String(CommonExpression):\n pass"
},
{
"identifier": "StringOperator",
"path": "bkflow_feel/parsers.py",
"snippet": "class StringOperator(BinaryOperator):\n validator_cls = BinaryOperationValidator\n\n def __init__(self, operation, left, right):\n super().__init__(left, right)\n self.operation = operation\n\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n self.validator_cls()(left_val, right_val, instance_type=str)\n return getattr(self, self.operation)(left_val, right_val)\n\n def contains(self, left_str, right_str):\n return right_str in left_str\n\n def starts_with(self, left_str, right_str):\n return left_str.startswith(right_str)\n\n def ends_with(self, left_str, right_str):\n return left_str.endswith(right_str)\n\n def matches(self, left_str, right_str):\n return re.match(right_str, left_str) is not None"
},
{
"identifier": "Time",
"path": "bkflow_feel/parsers.py",
"snippet": "class Time(Expression):\n def __init__(self, value, timezone: TZInfo = None):\n self.value = value\n self.timezone = timezone\n\n def evaluate(self, context):\n parsed_dt = date_parse(self.value)\n timezone = self.timezone.evaluate(context) if self.timezone is not None else None\n return datetime.time(parsed_dt.hour, parsed_dt.minute, parsed_dt.second, tzinfo=timezone)"
},
{
"identifier": "TodayFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class TodayFunc(Expression):\n def evaluate(self, context):\n return datetime.date.today()"
},
{
"identifier": "ToString",
"path": "bkflow_feel/parsers.py",
"snippet": "class ToString(CommonExpression):\n def evaluate(self, context):\n return str(self.value.evaluate(context))"
},
{
"identifier": "TZInfo",
"path": "bkflow_feel/parsers.py",
"snippet": "class TZInfo(Expression):\n def __init__(self, method, value):\n self.method = method\n self.value = value\n\n def evaluate(self, context):\n if self.method == \"name\":\n return pytz.timezone(self.value)\n elif self.method == \"offset\":\n hours, minutes = map(int, self.value.split(\":\"))\n sign = -1 if hours < 0 else 1\n hours = abs(hours)\n offset = hours * 60 + minutes\n return pytz.FixedOffset(sign * offset)"
},
{
"identifier": "Variable",
"path": "bkflow_feel/parsers.py",
"snippet": "class Variable(Expression):\n def __init__(self, name):\n self.name = name\n\n def evaluate(self, context):\n return context.get(self.name)"
},
{
"identifier": "IsDefinedFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class IsDefinedFunc(CommonExpression):\n def evaluate(self, context):\n return self.value.evaluate(context) is not None"
},
{
"identifier": "GetOrElseFunc",
"path": "bkflow_feel/parsers.py",
"snippet": "class GetOrElseFunc(BinaryOperator):\n def evaluate(self, context):\n left_val = self.left.evaluate(context)\n right_val = self.right.evaluate(context)\n return left_val if left_val is not None else right_val"
}
] | from lark import Token, Transformer, v_args
from .data_models import RangeGroupOperator
from .parsers import (
AfterFunc,
And,
BeforeFunc,
Between,
Boolean,
Context,
ContextItem,
Date,
DateAndTime,
DayOfWeekFunc,
Expr,
FuncInvocation,
FunctionCall,
In,
IncludesFunc,
List,
ListEvery,
ListFilter,
ListItem,
ListOperator,
ListSome,
MonthOfYearFunc,
Not,
NotEqual,
NowFunc,
Null,
Number,
Or,
Pair,
RangeGroup,
SameTypeBinaryOperator,
String,
StringOperator,
Time,
TodayFunc,
ToString,
TZInfo,
Variable, IsDefinedFunc, GetOrElseFunc,
) | 4,372 | # -*- coding: utf-8 -*-
@v_args(inline=True)
class FEELTransformer(Transformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def number(self, token):
try:
value = int(token.value)
except ValueError:
value = float(token.value)
| # -*- coding: utf-8 -*-
@v_args(inline=True)
class FEELTransformer(Transformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def number(self, token):
try:
value = int(token.value)
except ValueError:
value = float(token.value) | return Number(value) | 27 | 2023-11-09 13:47:26+00:00 | 8k |
sivasurend/lyzr | lyzr/chatqa/chatbot.py | [
{
"identifier": "pdf_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def pdf_chat_(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_pdf_as_documents(\n input_dir=input_dir,\n input_files=input_files,\n exclude_hidden=exclude_hidden,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
},
{
"identifier": "txt_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def txt_chat_(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_txt_as_documents(\n input_dir=input_dir,\n input_files=input_files,\n exclude_hidden=exclude_hidden,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
},
{
"identifier": "docx_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def docx_chat_(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[str]] = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_docx_as_documents(\n input_dir=input_dir,\n input_files=input_files,\n exclude_hidden=exclude_hidden,\n filename_as_id=filename_as_id,\n recursive=recursive,\n required_exts=required_exts,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
},
{
"identifier": "webpage_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def webpage_chat_(\n url: str = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_webpage_as_documents(\n url=url,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
},
{
"identifier": "website_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def website_chat_(\n url: str = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_website_as_documents(\n url=url,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
},
{
"identifier": "youtube_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def youtube_chat_(\n urls: List[str] = None,\n system_prompt: str = None,\n query_wrapper_prompt: str = None,\n embed_model: Union[str, EmbedType] = \"default\",\n llm_params: dict = None,\n vector_store_params: dict = None,\n service_context_params: dict = None,\n chat_engine_params: dict = None,\n) -> BaseChatEngine:\n documents = read_youtube_as_documents(\n urls=urls,\n )\n\n llm_params = {} if llm_params is None else llm_params\n vector_store_params = (\n {\"vector_store_type\": \"LanceDBVectorStore\"}\n if vector_store_params is None\n else vector_store_params\n )\n service_context_params = (\n {} if service_context_params is None else service_context_params\n )\n chat_engine_params = {} if chat_engine_params is None else chat_engine_params\n\n llm = LyzrLLMFactory.from_defaults(**llm_params)\n service_context = LyzrService.from_defaults(\n llm=llm,\n embed_model=embed_model,\n system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt,\n **service_context_params,\n )\n\n vector_store_index = LyzrVectorStoreIndex.from_defaults(\n **vector_store_params, documents=documents, service_context=service_context\n )\n\n return vector_store_index.as_chat_engine(\n **chat_engine_params, chat_mode=ChatMode.CONTEXT, similarity_top_k=5\n )"
}
] | from typing import Union, Optional, List
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.chat_engine.types import BaseChatEngine
from llama_index.embeddings.utils import EmbedType
from lyzr.utils.chat_utils import (
pdf_chat_,
txt_chat_,
docx_chat_,
webpage_chat_,
website_chat_,
youtube_chat_,
) | 3,694 | query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return pdf_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def docx_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return docx_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def txt_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return txt_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def webpage_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return webpage_chat_(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def website_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
|
class ChatBot:
def __init__(self) -> None:
return None
@staticmethod
def from_instances(
vector_store_index: VectorStoreIndex, service_context: ServiceContext, **kwargs
) -> BaseChatEngine:
return vector_store_index.as_chat_engine(
service_context=service_context, **kwargs
)
@staticmethod
def pdf_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return pdf_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def docx_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return docx_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def txt_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return txt_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def webpage_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return webpage_chat_(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def website_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine: | return website_chat_( | 4 | 2023-11-07 14:52:08+00:00 | 8k |
siyuanseever/llama2Rnn.c | train.py | [
{
"identifier": "Transformer",
"path": "model.py",
"snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs):\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)\n self.dropout = nn.Dropout(params.dropout)\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(params.dim, params.vocab_size, bias=False)\n\n # share the unembedding parameters with the embedding parameters\n self.tok_embeddings.weight = self.output.weight # https://paperswithcode.com/method/weight-tying\n\n # some useful precompute for the RoPE relative positional embeddings\n k = self.params.extend_seq_len / self.params.max_seq_len\n # various sequence length extrapolation\n if \"extrapolation\" in self.params.extend_method:\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.extend_seq_len)\n elif \"interpolation\" in self.params.extend_method:\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.extend_seq_len, k = k)\n elif \"radix\" in self.params.extend_method:\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.extend_seq_len, theta = 10000.0 * k)\n elif \"ntk\" in self.params.extend_method:\n freqs_cos, freqs_sin = ntk_freqs_cis(self.params.dim // self.params.n_heads, self.params.extend_seq_len, k=k)\n elif self.params.extend_method == \"rotate\":\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)\n if k > 1:\n freqs_cos = repeat_freqs(freqs_cos, int(k))\n freqs_sin = repeat_freqs(freqs_sin, int(k))\n elif \"PEClip\" in self.params.extend_method:\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)\n if k > 1:\n freqs_cos = repeat_freqs_clip(freqs_cos, int(k))\n freqs_sin = repeat_freqs_clip(freqs_sin, int(k))\n else:\n freqs_cos, freqs_sin = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.extend_seq_len)\n self.register_buffer(\"freqs_cos\", freqs_cos, persistent=False)\n self.register_buffer(\"freqs_sin\", freqs_sin, persistent=False)\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith('w3.weight') or pn.endswith('wo.weight') or pn.endswith('wm.weight'):\n torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * params.n_layers))\n\n # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.\n self.last_loss = None\n self.last_acc = None\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def _repeat_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n # 获取tokens的最后self.params.max_seq_len部分\n last_tokens = tokens[:, -self.params.max_seq_len:]\n # 重复last_tokens直到它和原始tokens的长度一样\n repeated_tokens = last_tokens.repeat(1, tokens.size(1) // last_tokens.size(1))\n return repeated_tokens\n\n def forward(self, \n tokens: torch.Tensor, targets: Optional[torch.Tensor] = None, \n eval_last: bool = False, repeat_tokens: bool = False,\n ) -> torch.Tensor:\n if self.params.extend_method == \"clip\":\n tokens = tokens[:, -self.params.max_seq_len:]\n targets = targets[:, -self.params.max_seq_len:]\n if repeat_tokens:\n tokens = self._repeat_tokens(tokens)\n targets = self._repeat_tokens(targets)\n\n bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n h = self.dropout(h)\n freqs_cos = self.freqs_cos[:seqlen]\n freqs_sin = self.freqs_sin[:seqlen]\n\n for layer in self.layers:\n h = layer(h, freqs_cos, freqs_sin)\n h = self.norm(h)\n\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h[:, -self.params.max_seq_len:, :])\n targets = targets[:, -self.params.max_seq_len:]\n if eval_last:\n logits = logits[:, [-1], :]\n targets = targets[:, [-1]]\n self.last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)), \n targets.reshape(-1),\n ignore_index=-1\n )\n\n _, predicts = torch.max(logits, -1)\n ignore_mask = targets != -1\n total_samples = ignore_mask.sum()\n self.last_acc = ((predicts == targets) & ignore_mask).sum().float() / total_samples.float()\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(h[:, [-1], :]) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {'params': decay_params, 'weight_decay': weight_decay},\n {'params': nodecay_params, 'weight_decay': 0.0}\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\")\n print(f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\")\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device_type == 'cuda'\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS \"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = sum(p.numel() for p in self.parameters())\n cfg = self.params\n L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim//cfg.n_heads, cfg.max_seq_len\n flops_per_token = 6*N + 12*L*H*Q*T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0/dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.inference_mode()\n def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):\n \"\"\"\n Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete\n the sequence max_new_tokens times, feeding the predictions back into the model each time.\n Most likely you'll want to make sure to be in model.eval() mode of operation for this.\n Also note this is a super inefficient version of sampling with no key/value cache.\n \"\"\"\n for _ in range(max_new_tokens):\n # if the sequence context is growing too long we must crop it at block_size\n idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]\n # forward the model to get the logits for the index in the sequence\n logits = self(idx_cond)\n logits = logits[:, -1, :] # crop to just the final time step\n if temperature == 0.0:\n # \"sample\" the single most likely index\n _, idx_next = torch.topk(logits, k=1, dim=-1)\n else:\n # pluck the logits at the final step and scale by desired temperature\n logits = logits / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float('Inf')\n # apply softmax to convert logits to (normalized) probabilities\n probs = F.softmax(logits, dim=-1)\n idx_next = torch.multinomial(probs, num_samples=1)\n # append sampled index to the running sequence and continue\n idx = torch.cat((idx, idx_next), dim=1)\n\n return idx"
},
{
"identifier": "ModelArgs",
"path": "model.py",
"snippet": "class ModelArgs:\n # default hyperparameters for the Llama 7B model\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = 32000\n hidden_dim: Optional[int] = None\n multiple_of: int = 256 # MLP hidden layer size will be multiple of\n norm_eps: float = 1e-5\n max_seq_len: int = 2048\n extend_seq_len: int = 2048\n extend_method: str = \"extrapolation\"\n dropout: float = 0.0\n attention_type: str = \"attention\"\n memory_attention: bool = False\n memseqlen: int = 128\n do_wm: bool = False\n do_memory_ffn: bool = False\n memory_norm: bool = False\n train_orimem: bool = False\n reuse_kv: bool = False\n lora: bool = False\n update_memory: bool = False\n use_saved_mem: bool = False\n key_norm: bool = False"
}
] | import math
import os
import time
import torch
import wandb
from contextlib import nullcontext
from datetime import datetime
from functools import partial
from tqdm import tqdm
from model import Transformer, ModelArgs
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DistributedDataParallel as DDP
from tinystories import Task
from ultrachat import Task
from wikipedia_en import Task
from wiki_zh import Task
from wiki import Task
from zhihu import Task
from jiiov import Task
from datatask import Task
from dataset import Task
from data_generator import Task | 5,049 | dtype = "float32" # float32|bfloat16|float16
compile = True # use PyTorch 2.0 to compile the model to be faster
# test_model
test_model = False
# fixing some hyperparams to sensible defaults
lr_decay_iters = max_iters # should be ~= max_iters per Chinchilla
min_lr = 0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# -----------------------------------------------------------------------------
config_keys = [
k
for k, v in globals().items()
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
]
exec(open("configurator.py").read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# model init
model_args = dict(
dim=dim,
n_layers=n_layers,
n_heads=n_heads,
n_kv_heads=n_kv_heads,
vocab_size=vocab_size,
multiple_of=multiple_of,
max_seq_len=max_seq_len,
extend_seq_len=max_seq_len,
extend_method=extend_method,
dropout=dropout,
attention_type=attention_type,
memseqlen=memseqlen,
do_wm=do_wm,
do_memory_ffn=do_memory_ffn,
memory_norm=memory_norm,
train_orimem=train_orimem,
reuse_kv=reuse_kv,
update_memory=update_memory,
use_saved_mem=bool(use_saved_mem),
key_norm=key_norm,
) # start with model_args from command line
# validating checks
assert vocab_source in ["llama2", "custom"]
assert vocab_source == "custom" or vocab_size == 32000, "The vocab from Meta has 32K tokens"
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend="nccl")
ddp_rank = int(os.environ["RANK"])
ddp_local_rank = int(os.environ["LOCAL_RANK"])
ddp_world_size = int(os.environ["WORLD_SIZE"])
device = f"cuda:{ddp_local_rank}"
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
if master_process:
print(f"tokens per iteration will be: {tokens_per_iter:,}")
print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = "cuda" if "cuda" in device else "cpu" # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
ctx = (
nullcontext()
if device_type == "cpu"
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
)
# task-specific setup
num_workers = os.cpu_count() // ddp_world_size - 1
num_workers = 0
print(f'task num workers = {num_workers}')
task_args = dict(
batch_size=batch_size,
max_seq_len=max_seq_len,
vocab_size=vocab_size,
vocab_source=vocab_source,
device=device,
num_workers = num_workers,
)
if task_name == 'tinystories':
elif task_name == 'ultrachat':
elif task_name == 'wikipedia_en':
elif task_name == 'wiki_zh':
elif task_name == 'wiki':
elif task_name == 'zhihu':
elif task_name == 'jiiov':
elif task_name.startswith('all'):
task_args["tasks"] = tasks
elif task_name.startswith('ds_'):
tasks = task_name[len('ds_'):].split('_')
task_args["tasks"] = tasks
elif task_name.startswith('dg_'):
tasks = task_name[len('dg_'):].split('_')
task_args["tasks"] = tasks
iter_batches = partial(Task.iter_batches, **task_args)
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
if init_from == "scratch":
# init a new model from scratch
print("Initializing a new model from scratch")
gptconf = ModelArgs(**model_args)
| """
This training script can be run both on a single gpu in debug mode,
and also in a larger training run with distributed data parallel (ddp).
To run on a single GPU small debug run, example:
$ python -m train.py --compile=False --eval_iters=10 --batch_size=8
To run with DDP on 4 gpus on 1 node, example:
$ torchrun --standalone --nproc_per_node=4 train.py
To run with DDP on 4 gpus across 2 nodes, example:
- Run on the first (master) node with example IP 123.456.123.456:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
- Run on the worker node:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
(If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1)
"""
# -----------------------------------------------------------------------------
# I/O
out_dir = "out"
eval_interval = 2000
log_interval = 1
eval_iters = 100
eval_only = False # if True, script exits right after the first eval
eval_last = False
repeat_tokens = False
always_save_checkpoint = False # if True, always save a checkpoint after each eval
init_from = "scratch" # 'scratch' or 'resume'
# wandb logging
wandb_log = False # disabled by default
wandb_project = "llamac"
wandb_run_name = "run" + datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# data
tasks = []
task_name = "tinystories"
batch_size = 128 # if gradient_accumulation_steps > 1, this is the micro-batch size
max_seq_len = 256
extend_method = "extrapolation"
vocab_source = "llama2" # llama2|custom; use Lllama 2 vocab from Meta, or custom trained
vocab_size = 32000 # the Llama 2 tokenizer has 32K tokens
# model
dim = 288
n_layers = 6
n_heads = 6
n_kv_heads = 6
multiple_of = 32
dropout = 0.0
# extrapolation
key_norm = False
# memory
attention_type = "attention"
memseqlen = 128
do_wm = False
do_memory_ffn = False
memory_norm = False
train_orimem = False
reuse_kv = False
save_memory = ""
update_memory = False
use_saved_mem = ""
# adamw optimizer
gradient_accumulation_steps = 4 # used to simulate larger batch sizes
learning_rate = 5e-4 # max learning rate
max_iters = 100000 # total number of training iterations
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
# learning rate decay settings
decay_lr = True # whether to decay the learning rate
warmup_iters = 1000 # how many steps to warm up for
# system
# device = "cuda" # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = "float32" # float32|bfloat16|float16
compile = True # use PyTorch 2.0 to compile the model to be faster
# test_model
test_model = False
# fixing some hyperparams to sensible defaults
lr_decay_iters = max_iters # should be ~= max_iters per Chinchilla
min_lr = 0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# -----------------------------------------------------------------------------
config_keys = [
k
for k, v in globals().items()
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
]
exec(open("configurator.py").read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# model init
model_args = dict(
dim=dim,
n_layers=n_layers,
n_heads=n_heads,
n_kv_heads=n_kv_heads,
vocab_size=vocab_size,
multiple_of=multiple_of,
max_seq_len=max_seq_len,
extend_seq_len=max_seq_len,
extend_method=extend_method,
dropout=dropout,
attention_type=attention_type,
memseqlen=memseqlen,
do_wm=do_wm,
do_memory_ffn=do_memory_ffn,
memory_norm=memory_norm,
train_orimem=train_orimem,
reuse_kv=reuse_kv,
update_memory=update_memory,
use_saved_mem=bool(use_saved_mem),
key_norm=key_norm,
) # start with model_args from command line
# validating checks
assert vocab_source in ["llama2", "custom"]
assert vocab_source == "custom" or vocab_size == 32000, "The vocab from Meta has 32K tokens"
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend="nccl")
ddp_rank = int(os.environ["RANK"])
ddp_local_rank = int(os.environ["LOCAL_RANK"])
ddp_world_size = int(os.environ["WORLD_SIZE"])
device = f"cuda:{ddp_local_rank}"
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
if master_process:
print(f"tokens per iteration will be: {tokens_per_iter:,}")
print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = "cuda" if "cuda" in device else "cpu" # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
ctx = (
nullcontext()
if device_type == "cpu"
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
)
# task-specific setup
num_workers = os.cpu_count() // ddp_world_size - 1
num_workers = 0
print(f'task num workers = {num_workers}')
task_args = dict(
batch_size=batch_size,
max_seq_len=max_seq_len,
vocab_size=vocab_size,
vocab_source=vocab_source,
device=device,
num_workers = num_workers,
)
if task_name == 'tinystories':
elif task_name == 'ultrachat':
elif task_name == 'wikipedia_en':
elif task_name == 'wiki_zh':
elif task_name == 'wiki':
elif task_name == 'zhihu':
elif task_name == 'jiiov':
elif task_name.startswith('all'):
task_args["tasks"] = tasks
elif task_name.startswith('ds_'):
tasks = task_name[len('ds_'):].split('_')
task_args["tasks"] = tasks
elif task_name.startswith('dg_'):
tasks = task_name[len('dg_'):].split('_')
task_args["tasks"] = tasks
iter_batches = partial(Task.iter_batches, **task_args)
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
if init_from == "scratch":
# init a new model from scratch
print("Initializing a new model from scratch")
gptconf = ModelArgs(**model_args) | model = Transformer(gptconf) | 0 | 2023-11-07 09:36:35+00:00 | 8k |
WolfgangFahl/dcm | dcm/dcm_assessment.py | [
{
"identifier": "Achievement",
"path": "dcm/dcm_core.py",
"snippet": "class Achievement:\n \"\"\"\n Class representing an individual's achievement level for a specific competence facet.\n\n Attributes:\n path (str): The path in the CompetenceTree, used to derive tree_id, aspect_id, and facet_id.\n level (int): The achieved level for this facet.\n score (float): How well the achievement was reached.\n score_unit (str): Unit of the score, default is \"%\".\n evidence (Optional[str]): Optional evidence supporting the achievement.\n date_assessed (Optional[str]): Optional date when the achievement was assessed (ISO-Format).\n \"\"\"\n\n path: str\n level: int = None\n score: float = None\n score_unit: Optional[str] = \"%\"\n evidence: Optional[str] = None\n date_assessed_iso: Optional[str] = None\n\n @property\n def tree_id(self):\n parts = self.path.split(\"/\")\n return parts[0] if parts else None\n\n @property\n def aspect_id(self):\n parts = self.path.split(\"/\")\n return parts[1] if len(parts) > 1 else None\n\n @property\n def area_id(self):\n parts = self.path.split(\"/\")\n return parts[2] if len(parts) > 2 else None\n\n @property\n def facet_id(self):\n parts = self.path.split(\"/\")\n return parts[3] if len(parts) > 3 else None"
},
{
"identifier": "CompetenceFacet",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceFacet(CompetenceElement):\n \"\"\"\n Represents a specific facet of a competence aspect, inheriting from CompetenceElement.\n\n This class can include additional properties or methods specific to a competence facet.\n \"\"\""
},
{
"identifier": "CompetenceArea",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceArea(CompetenceElement):\n \"\"\"\n Represents a specific area within a competence aspect, containing various facets.\n\n Attributes:\n facets (List[CompetenceFacet]): A list of CompetenceFacet objects representing individual facets of this area.\n \"\"\"\n facets: List[CompetenceFacet] = field(default_factory=list)"
},
{
"identifier": "CompetenceTree",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )"
},
{
"identifier": "DynamicCompetenceMap",
"path": "dcm/dcm_core.py",
"snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)"
},
{
"identifier": "Learner",
"path": "dcm/dcm_core.py",
"snippet": "class Learner:\n \"\"\"\n A learner with achievements.\n Attributes:\n learner_id (str): Identifier for the learner.\n achievements (Dict[str, List[Achievement]]):\n A dictionary where each key is a competence element identifier\n and the value is a list of Achievement instances for that tree.\n \"\"\"\n\n learner_id: str\n achievements: Optional[List[Achievement]] = field(default=None)\n\n def __post_init__(self):\n self.achievements_by_path = {}\n if self.achievements:\n for achievement in self.achievements:\n self.achievements_by_path[achievement.path] = achievement\n\n @classmethod\n def required_keys(cls):\n keys = {\"achievements\"}\n return keys\n\n @property\n def main_id(self):\n main_id = self.learner_id\n return main_id\n\n def add_achievement(self, new_achievement):\n self.achievements.append(new_achievement)\n self.achievements_by_path[new_achievement.path] = new_achievement\n\n def get_competence_tree_ids(self) -> List[str]:\n \"\"\"\n Get all unique competence tree IDs of my achievements.\n\n Returns:\n List[str]: A list of unique competence tree IDs.\n \"\"\"\n # Assuming that the learner's achievements are stored in a list called self.achievements\n # You can modify this part according to your actual data structure.\n\n # Create a set to store unique competence tree IDs\n unique_tree_ids = set()\n\n # Iterate through the learner's achievements\n for achievement in self.achievements:\n # Assuming each achievement has a tree_id attribute\n tree_id = achievement.tree_id\n\n # Add the tree_id to the set\n unique_tree_ids.add(tree_id)\n\n # Convert the set to a list and return\n return list(unique_tree_ids)"
}
] | from ngwidgets.progress import NiceguiProgressbar
from ngwidgets.webserver import NiceGuiWebserver
from ngwidgets.widgets import Link
from nicegui import ui
from dcm.dcm_core import (
Achievement,
CompetenceFacet,
CompetenceArea,
CompetenceTree,
DynamicCompetenceMap,
Learner,
) | 5,801 | self.learner = learner
self.achievement_index = 0
# do we need setup the achievements?
if self.learner.achievements is None:
self.learner.achievements = []
self.setup_achievements()
self.total = len(self.learner.achievements)
def clear(self):
"""
clear the ui
"""
self.container.clear()
@property
def current_achievement(self) -> Achievement:
if self.achievement_index < 0 or self.achievement_index > len(
self.learner.achievements
):
raise ValueError(f"invalid achievement index {self.achievement_index}")
achievement = self.learner.achievements[self.achievement_index]
return achievement
def setup_achievements(self):
"""
Setup achievements based on the competence tree.
This method iterates over the competence aspects and their facets,
constructs a path for each facet, and creates an Achievement instance
based on the path. These achievements are then added to the learner's
achievements list.
"""
for aspect in self.competence_tree.aspects:
for area in aspect.areas:
area_path: str = f"{self.competence_tree.id}/{aspect.id}"
self.add_achievement(area_path)
for facet in area.facets:
# Construct the path for the facet
facet_path=f"{area_path}/{facet.id}"
self.add_achievement(facet_path)
def add_achievement(self,path):
# Create a new Achievement instance with the constructed path
new_achievement = Achievement(
path=path,
)
self.learner.add_achievement(new_achievement)
def get_index_str(self) -> str:
index_str = f"{self.achievement_index+1:2}/{self.total:2}"
return index_str
def setup_ui(self):
"""
display my competence Tree elements
"""
with ui.grid(columns=1).classes("w-full") as self.container:
self.progress_bar = NiceguiProgressbar(
total=self.total, desc="self assessment", unit="facets"
)
self.progress_bar.reset()
with ui.row():
ui.button("", icon="arrow_back", on_click=lambda _args: self.step(-1))
ui.button("", icon="arrow_forward", on_click=lambda _args: self.step(1))
with ui.row():
with ui.card() as self.achievement_view:
self.index_view = ui.label(self.get_index_str())
self.link_view = ui.html()
self.markdown_view = ui.markdown()
self.button_row = ButtonRow(
self, self.competence_tree, self.current_achievement
)
def show_progress(self):
"""
Update the progress bar based on the
number of achievements with a non-None level value.
"""
count = sum(
1
for achievement in self.learner.achievements
if achievement.level is not None
)
self.progress_bar.total = self.total
self.progress_bar.update_value(count)
async def step(self, step: int = 0):
self.update_achievement_view(step)
def update_achievement_view(self, step: int = 0):
"""
display the active achievement as the step indicates
"""
self.show_progress()
self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)
if self.achievement_index + step < 0:
ui.notify("first achievement reached!")
step = 0
if self.achievement_index + step < len(self.learner.achievements):
self.achievement_index += step
self.index_view.text = self.get_index_str()
achievement = self.current_achievement
self.button_row.achievement = achievement
self.button_row.set_button_states(achievement)
competence_element = self.competence_tree.lookup_by_path(achievement.path)
if not competence_element:
ui.notify("invalid path: {achievement.path}")
self.markdown_view.content = f"⚠️ {achievement.path}"
else:
if hasattr(competence_element, "path"):
if competence_element.url:
link = Link.create(
competence_element.url, competence_element.path
)
else:
link = competence_element.path
else:
link = "⚠️ - competence element path missing"
self.link_view.content = link
description = competence_element.description or ""
| """
Created on 2024-01-10
@author: wf
"""
class ButtonRow:
"""
A button row for selecting competence levels
to document achievements from a CompetenceTree.
"""
def __init__(
self,
assessment: "Assessment",
competence_tree: CompetenceTree,
achievement: Achievement = None,
):
"""
Construct a button row for the competence levels of the given CompetenceTree.
Args:
assessment (Assessment): The Assessment instance.
competence_tree (CompetenceTree): The Competence Tree to display buttons for.
achievement (Achievement): The current achievement of the learner.
"""
self.assessment = assessment
self.competence_tree = competence_tree
self.achievement = achievement
self.setup_buttons()
self.set_button_states(achievement)
def setup_buttons(self):
"""
Create buttons for each competence level defined in the CompetenceTree.
"""
self.buttons = {}
with ui.row() as self.row:
for level in self.competence_tree.levels:
button = ui.button(
level.name,
icon=level.icon,
color=level.color_code,
on_click=lambda _args, l=level.level: self.handle_selection(l),
).tooltip(level.description)
self.buttons[level.level] = button
def set_button_states(self, achievement: Achievement):
"""
Set the state of buttons based on the given achievement.
Args:
achievement (Achievement): The current achievement of the learner.
"""
# If no achievement or level is set, enable all buttons
if achievement is None or achievement.level is None:
for button in self.buttons.values():
button.enable()
button.visible = True
else:
# Enable only the button corresponding to the current level and disable others
for level, button in self.buttons.items():
if level == achievement.level:
button.enable()
button.visible = True
else:
button.disable()
button.visible = False
def handle_selection(self, selected_level: int):
"""
handle the selected level
Args:
selected_level(int): the selected level
"""
# Check if the same level is selected again,
# then reset the selection
if self.achievement.level == selected_level:
self.achievement.level = None
else:
self.achievement.level = selected_level
self.set_button_states(self.achievement)
# refresh the ui
self.row.update()
# show achievement_view
step = 1 if self.achievement.level else 0
self.assessment.update_achievement_view(step)
class Assessment:
"""
Assessment for CompetenceTree
"""
def __init__(
self,
webserver: NiceGuiWebserver,
dcm: DynamicCompetenceMap,
learner: Learner,
debug: bool = False,
):
"""
initialize the assessment
Args:
webserver(NiceguiWebServer): the webserver context
dcm(DynamicCompetenceMap): the competence map
learner(Learner): the learner to get the self assessment for
debug(bool): if True show debugging information
"""
self.webserver = webserver
self.debug = debug
self.reset(dcm=dcm, learner=learner)
self.setup_ui()
def reset(
self,
dcm: DynamicCompetenceMap,
learner: Learner,
):
"""
(re)set the assessment
Args:
webserver(NiceguiWebServer): the webserver context
dcm(DynamicCompetenceMap): the competence map
learner(Learner): the learner to get the self assessment for
"""
self.dcm = dcm
self.competence_tree = dcm.competence_tree
self.learner = learner
self.achievement_index = 0
# do we need setup the achievements?
if self.learner.achievements is None:
self.learner.achievements = []
self.setup_achievements()
self.total = len(self.learner.achievements)
def clear(self):
"""
clear the ui
"""
self.container.clear()
@property
def current_achievement(self) -> Achievement:
if self.achievement_index < 0 or self.achievement_index > len(
self.learner.achievements
):
raise ValueError(f"invalid achievement index {self.achievement_index}")
achievement = self.learner.achievements[self.achievement_index]
return achievement
def setup_achievements(self):
"""
Setup achievements based on the competence tree.
This method iterates over the competence aspects and their facets,
constructs a path for each facet, and creates an Achievement instance
based on the path. These achievements are then added to the learner's
achievements list.
"""
for aspect in self.competence_tree.aspects:
for area in aspect.areas:
area_path: str = f"{self.competence_tree.id}/{aspect.id}"
self.add_achievement(area_path)
for facet in area.facets:
# Construct the path for the facet
facet_path=f"{area_path}/{facet.id}"
self.add_achievement(facet_path)
def add_achievement(self,path):
# Create a new Achievement instance with the constructed path
new_achievement = Achievement(
path=path,
)
self.learner.add_achievement(new_achievement)
def get_index_str(self) -> str:
index_str = f"{self.achievement_index+1:2}/{self.total:2}"
return index_str
def setup_ui(self):
"""
display my competence Tree elements
"""
with ui.grid(columns=1).classes("w-full") as self.container:
self.progress_bar = NiceguiProgressbar(
total=self.total, desc="self assessment", unit="facets"
)
self.progress_bar.reset()
with ui.row():
ui.button("", icon="arrow_back", on_click=lambda _args: self.step(-1))
ui.button("", icon="arrow_forward", on_click=lambda _args: self.step(1))
with ui.row():
with ui.card() as self.achievement_view:
self.index_view = ui.label(self.get_index_str())
self.link_view = ui.html()
self.markdown_view = ui.markdown()
self.button_row = ButtonRow(
self, self.competence_tree, self.current_achievement
)
def show_progress(self):
"""
Update the progress bar based on the
number of achievements with a non-None level value.
"""
count = sum(
1
for achievement in self.learner.achievements
if achievement.level is not None
)
self.progress_bar.total = self.total
self.progress_bar.update_value(count)
async def step(self, step: int = 0):
self.update_achievement_view(step)
def update_achievement_view(self, step: int = 0):
"""
display the active achievement as the step indicates
"""
self.show_progress()
self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)
if self.achievement_index + step < 0:
ui.notify("first achievement reached!")
step = 0
if self.achievement_index + step < len(self.learner.achievements):
self.achievement_index += step
self.index_view.text = self.get_index_str()
achievement = self.current_achievement
self.button_row.achievement = achievement
self.button_row.set_button_states(achievement)
competence_element = self.competence_tree.lookup_by_path(achievement.path)
if not competence_element:
ui.notify("invalid path: {achievement.path}")
self.markdown_view.content = f"⚠️ {achievement.path}"
else:
if hasattr(competence_element, "path"):
if competence_element.url:
link = Link.create(
competence_element.url, competence_element.path
)
else:
link = competence_element.path
else:
link = "⚠️ - competence element path missing"
self.link_view.content = link
description = competence_element.description or "" | if isinstance(competence_element, CompetenceArea): | 2 | 2023-11-06 09:24:24+00:00 | 8k |
weiwei-cool/FanQieNovelDownloadOnWeb | Api/views.py | [
{
"identifier": "Fanqie",
"path": "tools/Fanqie.py",
"snippet": "class FanqieNovel:\n def __init__(self, url, mode):\n def __str__(self):\n def parse_url(self, url: str) -> str:"
},
{
"identifier": "DownloadNovel",
"path": "tools/DownloadNovel.py",
"snippet": "class DownloadNovel(threading.Thread):\n \"\"\"\n 下载小说,应传入番茄对象\n \"\"\"\n\n def __init__(self, fanqie: Fanqie):\n # 番茄小说对象\n self.fanqie: Fanqie = fanqie\n # 停止子进程\n self._stop_flag = False\n self._stop_event = threading.Event()\n\n # 自定义WebDav路径\n self.is_webdav = os.environ.get('IS_WEBDAV')\n if self.is_webdav:\n self.webdav_username = os.environ.get('WEBDAV_USERNAME')\n self.webdav_pwd = os.environ.get('WEBDAV_PWD')\n self.webdav_url = os.environ.get('WEBDAV_URL')\n self.webdav = Client(base_url=self.webdav_url,\n auth=(self.webdav_username, self.webdav_pwd))\n tools.logger.info(f'已成功加载webdav服务器({self.webdav_url})')\n\n # 自定义保存路径\n self.custom_path = os.environ.get('CUSTOM_PATH')\n if not self.custom_path:\n self.custom_path = './books'\n os.makedirs(self.custom_path, exist_ok=True)\n tools.logger.warning(f'您未设置自定义保存路径,将使用默认路径: {self.custom_path}')\n\n super().__init__()\n\n def run(self) -> None:\n # 数据库中获取小说对象\n history_entry = History.objects.get(obid=self.fanqie.obid)\n tools.logger.info(f'开始下载小说: \\n{self.fanqie.__str__()}')\n\n # 判断下载模式\n if self.fanqie.mode == 'txt':\n tools.logger.info(f'正在以txt模式下载小说')\n\n content = f\"\"\"{self.fanqie.title}\n {self.fanqie.intro}\n \"\"\"\n # 获取所有章节链接\n start_index = 0\n\n file_name = self.fanqie.title + \".txt\"\n file_path = os.path.join(self.custom_path, file_name)\n\n # 获取章节数\n chapters = self.fanqie.soup.find_all(\"div\", class_=\"chapter-item\")\n chapter_num = len(chapters)\n chapter_num_now = 0\n\n try:\n # 遍历每个章节链接\n for chapter in chapters[start_index:]:\n if self._stop_event.is_set():\n break\n time.sleep(0.25)\n if self._stop_event.is_set():\n break\n # 获取章节标题\n chapter_title = chapter.find(\"a\").get_text()\n\n # 获取章节网址\n chapter_url = urljoin(self.fanqie.url, chapter.find(\"a\")[\"href\"])\n\n # 获取章节 id\n chapter_id = re.search(r\"/(\\d+)\", chapter_url).group(1)\n\n # 构造 api 网址\n api_url = (f\"https://novel.snssdk.com/api/novel/book/reader/full/v1/?device_platform=android&\"\n f\"parent_enterfrom=novel_channel_search.tab.&aid=2329&platform_id=1&group_id=\"\n f\"{chapter_id}&item_id={chapter_id}\")\n\n # 尝试获取章节内容\n chapter_content = None\n retry_count = 1\n while retry_count < 4: # 设置最大重试次数\n if self._stop_event.is_set():\n break\n\n def get_api():\n # 获取 api 响应\n api_response_ = requests.get(api_url, headers=self.fanqie.headers)\n\n # 解析 api 响应为 json 数据\n api_data_ = api_response_.json()\n return api_data_\n\n api_data = None\n retry_get_api = 1\n while retry_get_api < 4:\n try:\n api_data = get_api()\n except Exception as e:\n tools.logger.error(f'错误!{e}')\n else:\n break\n retry_get_api += 1\n\n if \"data\" in api_data and \"content\" in api_data[\"data\"]:\n chapter_content = api_data[\"data\"][\"content\"]\n break # 如果成功获取章节内容,跳出重试循环\n else:\n if retry_count == 1:\n tools.logger.warning(f'{chapter_title} 获取失败,正在尝试重试...')\n tools.logger.warning(f'第 ({retry_count}/3) 次重试获取章节内容')\n retry_count += 1 # 否则重试\n\n if retry_count == 4:\n tools.logger.error(f'无法获取章节内容: {chapter_title},跳过。')\n continue # 重试次数过多后,跳过当前章节\n\n # 提取文章标签中的文本\n chapter_text = re.search(r\"<article>([\\s\\S]*?)</article>\", chapter_content).group(1)\n\n # 将 <p> 标签替换为换行符\n chapter_text = re.sub(r\"<p>\", \"\\n\", chapter_text)\n\n # 去除其他 html 标签\n chapter_text = re.sub(r\"</?\\w+>\", \"\", chapter_text)\n\n chapter_text = tools.fix_publisher(chapter_text)\n\n # 在小说内容字符串中添加章节标题和内容\n content += f\"\\n\\n\\n{chapter_title}\\n{chapter_text}\"\n\n chapter_num_now += 1\n history_entry.percent = round(\n (chapter_num_now / chapter_num) * 100, 2)\n history_entry.save()\n\n # 打印进度信息\n tools.logger.info(f'已获取 {chapter_title}, 进度:{history_entry.percent}%')\n # 根据编码转换小说内容字符串为二进制数据\n data = content.encode('utf-8', errors='ignore')\n\n # 保存文件\n with open(file_path, \"wb\") as f:\n f.write(data)\n\n file_path = os.path.join(self.custom_path, file_name)\n file_path = Path(file_path)\n if self.is_webdav:\n self.webdav.upload_file(from_path=file_path,\n to_path=os.path.join('/public', file_name),\n overwrite=True)\n tools.logger.info(f'《{self.fanqie.title}》已成功上传webdav服务器')\n\n # 打印完成信息\n tools.logger.info(f'已保存{self.fanqie.title}.txt至本地')\n\n except BaseException as e:\n # 捕获所有异常,及时保存文件\n tools.logger.error(f'发生异常: \\n{e}')\n tools.logger.info('正在尝试保存文件')\n # 根据编码转换小说内容字符串为二进制数据\n data = content.encode('utf-8', errors='ignore')\n\n # 保存文件\n file_path = os.path.join(self.custom_path, file_name)\n with open(file_path, \"wb\") as f:\n f.write(data)\n\n tools.logger.info('文件已保存!')\n return\n\n elif self.fanqie.mode == 'epub':\n tools.logger.info(f'正在以epub模式下载小说')\n\n # 创建epub电子书\n book = epub.EpubBook()\n\n # 下载封面\n response = requests.get(self.fanqie.img_url)\n # 获取图像的内容\n img_data = response.content\n\n # 保存图像到本地文件\n with open(\"cover.jpg\", \"wb\") as f:\n f.write(img_data)\n\n # 创建一个封面图片\n book.set_cover(\"image.jpg\", open('cover.jpg', 'rb').read())\n\n # 删除封面\n os.remove('cover.jpg')\n\n # 设置书的元数据\n book.set_title(self.fanqie.title)\n book.set_language('zh-CN')\n book.add_author(self.fanqie.author_name)\n book.add_metadata('DC', 'description', self.fanqie.intro)\n\n # 获取卷标\n page_directory_content = self.fanqie.soup.find('div', class_='page-directory-content')\n nested_divs = page_directory_content.find_all('div', recursive=False)\n\n # intro chapter\n intro_e = epub.EpubHtml(title='Introduction', file_name='intro.xhtml', lang='hr')\n intro_e.content = (f'<html><head></head><body>'\n f'<img src=\"image.jpg\" alt=\"Cover Image\"/>'\n f'<h1>{self.fanqie.title}</h1>'\n f'<p>{self.fanqie.intro}</p>'\n f'</body></html>')\n book.add_item(intro_e)\n\n # 创建索引\n book.toc = (epub.Link('intro.xhtml', '简介', 'intro'),)\n book.spine = ['nav', intro_e]\n\n # 获取章节数\n chapters = self.fanqie.soup.find_all(\"div\", class_=\"chapter-item\")\n chapter_num = len(chapters)\n chapter_num_now = 0\n\n try:\n volume_id = 0\n\n # 遍历每个卷\n for div in nested_divs:\n if self._stop_event.is_set():\n break\n first_chapter = None\n volume_id += 1\n volume_div = div.find('div', class_='volume')\n # 提取 \"卷名\" 文本\n volume_title = volume_div.text\n tools.logger.info(f'正在获取{volume_title}')\n chapters = div.find_all(\"div\", class_=\"chapter-item\")\n start_index = None\n for i, chapter in enumerate(chapters):\n if self._stop_event.is_set():\n break\n chapter_url_tmp = urljoin(self.fanqie.url, chapter.find(\"a\")[\"href\"])\n chapter_id_tmp = re.search(r\"/(\\d+)\", chapter_url_tmp).group(1)\n if chapter_id_tmp == '0': # epub模式不支持起始章节\n start_index = i\n\n # 定义目录索引\n toc_index = ()\n\n chapter_id_name = 0\n\n # 遍历每个章节链接\n for chapter in chapters[start_index:]:\n chapter_id_name += 1\n if self._stop_event.is_set():\n break\n time.sleep(0.25)\n if self._stop_event.is_set():\n break\n # 获取章节标题\n chapter_title = chapter.find(\"a\").get_text()\n\n # 获取章节网址\n chapter_url = urljoin(self.fanqie.url, chapter.find(\"a\")[\"href\"])\n\n # 获取章节 id\n chapter_id = re.search(r\"/(\\d+)\", chapter_url).group(1)\n\n # 构造 api 网址\n api_url = (f\"https://novel.snssdk.com/api/novel/book/reader/full/v1/?device_platform=android&\"\n f\"parent_enterfrom=novel_channel_search.tab.&aid=2329&platform_id=1&group_id=\"\n f\"{chapter_id}&item_id={chapter_id}\")\n\n # 尝试获取章节内容\n chapter_content = None\n retry_count = 1\n while retry_count < 4: # 设置最大重试次数\n if self._stop_event.is_set():\n break\n\n def get_api():\n # 获取 api 响应\n api_response_ = requests.get(api_url, headers=self.fanqie.headers)\n\n # 解析 api 响应为 json 数据\n api_data_ = api_response_.json()\n return api_data_\n\n api_data = None\n retry_get_api = 1\n while retry_get_api < 4:\n try:\n api_data = get_api()\n except Exception as e:\n tools.logger.error(f'发生异常: \\n{e}')\n else:\n break\n retry_get_api += 1\n\n if \"data\" in api_data and \"content\" in api_data[\"data\"]:\n chapter_content = api_data[\"data\"][\"content\"]\n break # 如果成功获取章节内容,跳出重试循环\n else:\n if retry_count == 1:\n tools.logger.warning(f'{chapter_title} 获取失败,正在尝试重试...')\n tools.logger.warning(f'第 ({retry_count}/3) 次重试获取章节内容')\n retry_count += 1 # 否则重试\n\n if retry_count == 4:\n tools.logger.error(f'无法获取章节内容: {chapter_title},跳过。')\n continue # 重试次数过多后,跳过当前章节\n\n # 提取文章标签中的文本\n chapter_text = re.search(r\"<article>([\\s\\S]*?)</article>\", chapter_content).group(1)\n\n # 在小说内容字符串中添加章节标题和内容\n text = epub.EpubHtml(title=chapter_title,\n file_name=f'chapter_{volume_id}_{chapter_id_name}.xhtml')\n text.content = (f'<h2>{chapter_title}</h2>'\n f'{chapter_text}')\n\n toc_index = toc_index + (text,)\n book.spine.append(text)\n\n # 寻找第一章\n if chapter_id_name == 1:\n first_chapter = f'chapter_{volume_id}_{chapter_id_name}.xhtml'\n\n # 加入epub\n book.add_item(text)\n\n chapter_num_now += 1\n history_entry.percent = round(\n (chapter_num_now / chapter_num) * 100, 2)\n history_entry.save()\n\n # 打印进度信息\n tools.logger.info(f'已获取 {chapter_title}, 进度:{history_entry.percent}%')\n # 加入书籍索引\n book.toc = book.toc + ((epub.Section(volume_title, href=first_chapter),\n toc_index,),)\n # 捕获异常\n except BaseException as e:\n # 捕获所有异常\n tools.logger.error(f'发生异常: \\n{e}')\n return\n\n # 添加 navigation 文件\n book.add_item(epub.EpubNcx())\n book.add_item(epub.EpubNav())\n\n # 拼接文件名和文件路径\n file_name = self.fanqie.title + \".epub\"\n file_path = os.path.join(self.custom_path, file_name)\n\n # 书写电子书\n epub.write_epub(file_path, book, {})\n\n # webdav上传\n file_path = Path(file_path)\n if self.is_webdav:\n self.webdav.upload_file(from_path=file_path,\n to_path=os.path.join('/public', file_name),\n overwrite=True)\n tools.logger.info(f'《{self.fanqie.title}》已成功上传webdav服务器')\n\n tools.logger.info(f'已保存{self.fanqie.title}.epub至本地')\n\n # 停止子进程函数\n def stop(self):\n self._stop_event.set()"
},
{
"identifier": "History",
"path": "Api/models.py",
"snippet": "class History(models.Model):\n file_name = models.CharField(max_length=255)\n percent = models.FloatField(default=0)\n book_id = models.CharField(max_length=255)\n obid = models.CharField(max_length=255)\n objects = models.Manager()"
}
] | import os
import tools
import json
from django.http import JsonResponse
from tools import Fanqie, DownloadNovel
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .models import History | 4,160 |
# 下载的小说集合
download_object = []
@csrf_exempt # 为了允许跨域请求,可选
@require_POST # 确保只接受POST请求,可选
@tools.logger.catch # 获取详细的报错信息
def download(request): # 下载接口
global download_object
if request.method == 'POST':
try:
# 获取url数据
tools.logger.info('正在获取url数据……') # 打印日志
data = json.loads(request.body.decode('utf-8'))
urls = data.get('urls', [])
# 初步去重
urls = list(set(urls))
tools.logger.info(f'已获取urls为:{urls}')
# 获取下载方式
format_ = data.get('format', 'txt')
tools.logger.info(f'下载方式为{format_}')
# 获取书本信息
books = []
|
# 下载的小说集合
download_object = []
@csrf_exempt # 为了允许跨域请求,可选
@require_POST # 确保只接受POST请求,可选
@tools.logger.catch # 获取详细的报错信息
def download(request): # 下载接口
global download_object
if request.method == 'POST':
try:
# 获取url数据
tools.logger.info('正在获取url数据……') # 打印日志
data = json.loads(request.body.decode('utf-8'))
urls = data.get('urls', [])
# 初步去重
urls = list(set(urls))
tools.logger.info(f'已获取urls为:{urls}')
# 获取下载方式
format_ = data.get('format', 'txt')
tools.logger.info(f'下载方式为{format_}')
# 获取书本信息
books = [] | [books.append(Fanqie.FanqieNovel(url, format_)) for url in urls] | 0 | 2023-11-05 09:35:20+00:00 | 8k |
StoneMoe/ASub | app/ui/views/project_view.py | [
{
"identifier": "Project",
"path": "app/core/models/project.py",
"snippet": "class Project:\r\n path: str # 工程目录(相对位置)\r\n name: str # 工程名称\r\n\r\n def __init__(self, name: str, existed_err=False):\r\n self.name = name\r\n self.path = os.path.join(Core.PROJ_DIR, name)\r\n try:\r\n os.makedirs(self.path)\r\n info(f'已创建目录 {self.path}')\r\n except OSError as e: # directory existed\r\n if existed_err:\r\n raise e\r\n\r\n def _prepare(self):\r\n info(f'正在预处理 \"{self.name}\" 的音频')\r\n tmp_path = os.path.join(self.path, 'source.wav')\r\n tmp_file = test_files(tmp_path)\r\n src_file = test_files(\r\n os.path.join(self.path, 'source.mp4'),\r\n os.path.join(self.path, f'{self.name}.mp4'),\r\n os.path.join(self.path, f'{self.name}.mp3')\r\n )\r\n if tmp_file:\r\n info(f'找到了临时文件 \"{tmp_file}\",跳过预处理')\r\n elif src_file:\r\n info(f'找到了 \"{src_file}\",开始预处理')\r\n if check_ffmpeg() != FFMpegStatus.READY:\r\n raise EnvironmentError('FFMpeg尚未安装')\r\n proc: Popen[bytes] = ffmpeg.input(src_file) \\\r\n .output(tmp_path, format='wav', acodec='pcm_s16le', ac=1, ar=16000) \\\r\n .overwrite_output() \\\r\n .run_async(pipe_stdout=True, pipe_stderr=True)\r\n out, err = proc.communicate()\r\n return_code = proc.wait()\r\n if return_code != 0:\r\n raise ChildProcessError('无法提取音频')\r\n info('预处理成功')\r\n else:\r\n raise FileNotFoundError(f'请将同名 mp4 文件放置在 {self.path}')\r\n\r\n def delete(self):\r\n \"\"\"Delete project folder\"\"\"\r\n shutil.rmtree(self.path)\r\n\r\n def transcribe(self, opt: TranscribeOpt):\r\n \"\"\"\r\n transcribe wav audio to SRT\r\n\r\n :return: transcribe result file path\r\n \"\"\"\r\n self._prepare()\r\n\r\n target_file = opt.make_srt_filepath(name=self.name, path=self.path)\r\n if os.path.isfile(target_file):\r\n info(f'文件 \"{target_file}\" 已存在,跳过听写')\r\n return target_file\r\n\r\n info(f'使用 {opt}')\r\n match opt.backend:\r\n # case Engine.CPP_CPU:\r\n # ext = ''\r\n # if opt.compress_ratio_threshold:\r\n # ext += f' -et {opt.compress_ratio_threshold} '\r\n # if opt.prompt_name:\r\n # ext += f' --prompt \"{DB.PROMPTS[opt.prompt_name]}\" '\r\n # if opt.speedup:\r\n # ext += f' -su '\r\n # if opt.ss and opt.t:\r\n # ss = opt.ss * 1000\r\n # t = opt.t * 1000\r\n # if opt.speedup:\r\n # ss /= 2\r\n # t /= 2\r\n # ext += f' -ot {ss} -d {t} '\r\n # cmd = f\".\\\\whisper\\\\main.exe -m data/whisper_model/ggml-large-v2.bin \" \\\r\n # f\"-pp -osrt -l {opt.lang} -t 8 {ext} -f {self.path}/source.wav -of {target_file.rstrip('.srt')}\"\r\n # print(f'运行: {cmd}')\r\n # proc = subprocess.Popen(cmd, shell=True, cwd=os.getcwd(), stdout=subprocess.PIPE)\r\n # for line in proc.stdout:\r\n # print(line.decode(Core.CODEC).rstrip())\r\n case 'py-gpu' | 'py-cpu':\r\n info('正在加载模型')\r\n import whisper\r\n import torch\r\n model = whisper.load_model(opt.model, download_root='whisper_model', device='cpu')\r\n if opt.quantize:\r\n info('正在量化模型')\r\n model = torch.quantization.quantize_dynamic(\r\n model, {torch.nn.Linear}, dtype=torch.qint8\r\n )\r\n if opt.backend == 'py-gpu':\r\n info('正在加载至显卡')\r\n model.to('cuda')\r\n result = model.transcribe(\r\n audio=f'{self.path}/source.wav',\r\n language=opt.lang,\r\n compression_ratio_threshold=opt.compress_ratio_threshold,\r\n initial_prompt=Consts.PROMPTS[opt.prompt_name],\r\n verbose=True,\r\n )\r\n\r\n del model\r\n torch.cuda.empty_cache()\r\n\r\n segments = result['segments']\r\n srt = SRTFile(source=segments)\r\n srt.dump(target_file)\r\n case _:\r\n raise NotImplementedError(f'{opt.backend} 引擎尚未支持')\r\n\r\n info('听写完成')\r\n\r\n def translate(self, opt: TranscribeOpt, vocab=None):\r\n srt = SRTFile(source=opt.make_srt_filepath(self.name, self.path))\r\n srt.translate(vocab=vocab)\r\n\r\n @classmethod\r\n def list(cls) -> List[str]:\r\n \"\"\"list all projects\"\"\"\r\n names = os.listdir(Core.PROJ_DIR)\r\n directories = [name for name in names if os.path.isdir(os.path.join(Core.PROJ_DIR, name))]\r\n directories = sort_titles(directories)\r\n return directories\r\n\r\n @classmethod\r\n def bulk_create(cls, targets: List[tuple]):\r\n info(f'正在创建 {len(targets)} 个工程')\r\n for proj_name, filepath in targets:\r\n try:\r\n proj = Project(proj_name, existed_err=True)\r\n except OSError:\r\n info(f'\"{proj_name}\" 已存在,不再创建')\r\n continue\r\n\r\n if filepath:\r\n dst_filepath = os.path.join(proj.path, os.path.basename(filepath))\r\n info(f'正在将 {filepath} 复制到 {dst_filepath}')\r\n shutil.copy(filepath, dst_filepath)\r\n info('复制完毕')\r"
},
{
"identifier": "TranscribeOpt",
"path": "app/core/models/project.py",
"snippet": "class TranscribeOpt:\r\n \"\"\"\r\n :param backend: whisper implementation\r\n :param model: whisper model name\r\n :param quantize: whisper model quantization switch\r\n :param ss: transcribe start second\r\n :param t: transcribe time duration(second)\r\n :param compress_ratio_threshold: 2.4 ~ 3 is recommended, segments higher than this will be re-inferenced\r\n :param speedup: double speed, decrease quality\r\n :param prompt_name: name\r\n \"\"\"\r\n backend: str\r\n model: str\r\n quantize: bool\r\n lang: Optional[str]\r\n ss: int # TODO: implement in whisper.py mode\r\n t: int # TODO: implement in whisper.py mode\r\n compress_ratio_threshold: float\r\n speedup: bool # TODO: implement in whisper.py mode\r\n prompt_name: str\r\n\r\n def make_srt_filepath(self, name: str, path: str) -> str:\r\n return f'{path}/' \\\r\n f'{name}' \\\r\n f'[{self.backend}]' \\\r\n f'[{self.model}]' \\\r\n f'[q{int(self.quantize)}]' \\\r\n f'[L{self.lang or \"auto\"}]' \\\r\n f'[t{\"FULL\" if not (self.ss and self.t) else f\"{self.ss}-{self.ss + self.t}\"}]' \\\r\n f'[e{self.compress_ratio_threshold}]' \\\r\n f'[s{int(self.speedup)}]' \\\r\n f'[p{self.prompt_name or \"-\"}]' \\\r\n f'.srt'\r"
},
{
"identifier": "info",
"path": "app/core/utils/generic.py",
"snippet": "def info(text):\r\n print(f\"ℹ️{text}\")\r"
},
{
"identifier": "AutoLabel",
"path": "app/ui/components/label.py",
"snippet": "class AutoLabel(QLabel):\r\n def __init__(self, text, parent=None, elide_mode=None):\r\n super().__init__(text, parent)\r\n self._raw_text = text\r\n self._elide_mode = elide_mode if elide_mode is not None else Qt.ElideMiddle\r\n self._eliding = False\r\n\r\n def _get_elided_text(self):\r\n return self.fontMetrics().elidedText(self._raw_text, self._elide_mode, self.width())\r\n\r\n def resizeEvent(self, event: QtGui.QResizeEvent):\r\n super().resizeEvent(event)\r\n if self._eliding:\r\n return\r\n\r\n self._eliding = True\r\n super().setText(self._get_elided_text())\r\n self._eliding = False\r\n\r\n def setText(self, text):\r\n self._raw_text = text\r\n super().setText(self._get_elided_text())\r"
},
{
"identifier": "cfg",
"path": "app/ui/config.py",
"snippet": "class Engine(Enum):\r\nclass TranscribeModel(Enum):\r\nclass UILang(Enum):\r\nclass TranscribeLang(Enum):\r\nclass Config(QConfig):\r\n PY_CPU = \"py-cpu\"\r\n PY_GPU = \"py-gpu\"\r\n CPP_CPU = \"cpp-cpu\"\r\n LARGE_V2 = \"large-v2\"\r\n MEDIUM = \"medium\"\r\n SMALL = \"small\"\r\n BASE = \"base\"\r\n TINY = \"tiny\"\r\n CHINESE_SIMPLIFIED = \"chs\"\r\n CHINESE_TRADITIONAL = \"cht\"\r\n ENGLISH = \"en\"\r\n AUTO = \"auto\"\r\n AUTO = None\r\n def options(cls):\r"
},
{
"identifier": "CONTAINER_MARGINS",
"path": "app/ui/const.py",
"snippet": "CONTAINER_MARGINS = (32, 64, 32, 32)\r"
},
{
"identifier": "run_in_thread",
"path": "app/ui/utils.py",
"snippet": "def run_in_thread(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n if args and kwargs:\r\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\r\n elif args:\r\n t = threading.Thread(target=func, args=args)\r\n else:\r\n t = threading.Thread(target=func)\r\n t.daemon = True\r\n t.start()\r\n return t\r\n\r\n return wrapper\r"
},
{
"identifier": "clear_layout",
"path": "app/ui/utils.py",
"snippet": "def clear_layout(layout):\r\n while layout.count():\r\n child = layout.takeAt(0)\r\n if child.widget():\r\n child.widget().deleteLater()\r\n elif child.layout():\r\n clear_layout(child.layout())\r"
},
{
"identifier": "open_folder",
"path": "app/ui/utils.py",
"snippet": "def open_folder(folder_path):\r\n \"\"\"Open specific folder in file explorer application\"\"\"\r\n if os.name == 'nt': # Windows\r\n os.startfile(folder_path)\r\n elif os.name == 'posix': # Linux, macOS, etc.\r\n subprocess.Popen(['xdg-open', folder_path])\r\n else:\r\n raise OSError(f'Unsupported platform: {os.name}')\r"
},
{
"identifier": "SubtitleWindow",
"path": "app/ui/windows/subtitle_window.py",
"snippet": "class SubtitleWindow(QDialog, FramelessWindow):\n def __init__(self, filepath: str, parent=None):\n super().__init__(parent)\n self.srt_file = SRTFile(filepath)\n self.hBoxLayout = QVBoxLayout(self)\n self.tableView = TableWidget(self)\n self.saveButton = QPushButton(\"Save\", self)\n self.saveButton.clicked.connect(self._save_subtitle_file)\n\n self.hBoxLayout.setContentsMargins(*CONTAINER_MARGINS)\n self.hBoxLayout.addWidget(self.tableView)\n self.hBoxLayout.addWidget(self.saveButton)\n\n self.init_window()\n self._load_subtitle_file()\n\n def _load_subtitle_file(self):\n self.tableView.setWordWrap(False)\n self.tableView.setRowCount(len(self.srt_file.entries))\n self.tableView.setColumnCount(3)\n for i, entry in enumerate(self.srt_file.entries):\n self.tableView.setItem(i, 0, QTableWidgetItem(entry.index))\n self.tableView.setItem(i, 1, QTableWidgetItem(entry.time))\n self.tableView.setItem(i, 2, QTableWidgetItem(entry.text))\n\n self.tableView.verticalHeader().hide()\n self.tableView.setHorizontalHeaderLabels(['Index', 'Time', 'Text'])\n self.tableView.resizeColumnsToContents()\n\n def _save_subtitle_file(self):\n for i in range(self.tableView.rowCount()):\n self.srt_file.entries[i].index = self.tableView.item(i, 0).text()\n self.srt_file.entries[i].time = self.tableView.item(i, 1).text()\n self.srt_file.entries[i].text = self.tableView.item(i, 2).text()\n\n self.srt_file.dump()\n\n def init_window(self):\n self.setWindowTitle(f'编辑 {self.srt_file.filepath}')\n self.resize(625, 700)\n self._set_qss()\n\n def _set_qss(self):\n color = 'dark' if isDarkTheme() else 'light'\n with open(res_dir(f'app/ui/resource/qss/{color}/style.qss'), encoding='utf-8') as f:\n self.setStyleSheet(f.read())"
}
] | import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QAction
from qfluentwidgets import PushButton, FluentIcon, RoundMenu, ToolButton, MessageBox, StateToolTip
from app.core.models.project import Project, TranscribeOpt
from app.core.utils.generic import info
from app.ui.components.label import AutoLabel
from app.ui.config import cfg
from app.ui.const import CONTAINER_MARGINS
from app.ui.utils import run_in_thread, clear_layout, open_folder
from app.ui.windows.subtitle_window import SubtitleWindow
| 4,301 |
def set_project(self, project: Project):
self.project = project
self.label_title.setText(self.project.name)
self.label_title.setToolTip(self.project.name)
self._reload_subtitle_list()
def _init_layout(self):
self.layout_title.addWidget(self.label_title)
self.layout_title.addWidget(self.btn_manage)
self.layout.addLayout(self.layout_title)
self.layout.addLayout(self.layout_subtitles)
self.layout.addStretch(1)
self.layout.addWidget(self.btn_transcribe)
self.layout.setContentsMargins(*CONTAINER_MARGINS)
def _init_signal(self):
self.sig_subtitle_list_loaded.connect(self._on_subtitle_list_loaded)
self.sig_transcribe_running.connect(self._on_transcribe_running_changed)
def _on_transcribe_running_changed(self, running: bool):
if self.state_tooltip is None:
self.state_tooltip = StateToolTip('正在听写中', '请耐心等待', self)
self.state_tooltip.closeButton.hide()
if running:
self.btn_transcribe.setDisabled(True)
self.state_tooltip.move(10, 10)
self.state_tooltip.show()
else:
self.btn_transcribe.setDisabled(False)
self.state_tooltip.setState(True)
self.state_tooltip.setTitle('听写完成!')
self.state_tooltip.setContent('')
self.state_tooltip = None
def _on_subtitle_list_loaded(self, filenames: list):
clear_layout(self.layout_subtitles)
for filename in filenames:
layout = QHBoxLayout(self)
label = AutoLabel(filename, self, Qt.ElideLeft)
label.setToolTip(filename)
btn_translate = ToolButton(FluentIcon.EDIT, self)
btn_translate.setToolTip('编辑')
btn_translate.clicked.connect(self._on_subtitle_edit_clicked(filename))
btn_delete = ToolButton(FluentIcon.DELETE, self)
btn_delete.setToolTip('删除')
btn_delete.clicked.connect(self._on_subtitle_delete_clicked(filename))
layout.addWidget(label)
layout.addWidget(btn_translate)
layout.addWidget(btn_delete)
self.layout_subtitles.addLayout(layout)
def _reload_subtitle_list(self):
self.sig_subtitle_list_loaded.emit(
[
filename
for filename in os.listdir(self.project.path)
if filename.endswith('.srt') or filename.endswith('.ass')
]
)
def _on_subtitle_edit_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
edit_win = SubtitleWindow(target_file)
edit_win.exec_()
return f
def _on_subtitle_delete_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
if MessageBox('删除确认', f'真的要删除 {target_file} 吗?', self.window()).exec():
os.remove(target_file)
self._reload_subtitle_list()
return f
def _on_btn_manage_clicked(self, pos):
menu = RoundMenu(parent=self)
act_open_folder = QAction(FluentIcon.FOLDER.icon(), '打开文件夹')
act_archive = QAction(FluentIcon.SAVE.icon(), '归档')
act_clear_srt = QAction(FluentIcon.DELETE.icon(), '删除所有 SRT 文件')
act_clear_ass = QAction(FluentIcon.DELETE.icon(), '删除所有 ASS 文件')
act_delete_proj = QAction(FluentIcon.DELETE.icon(), '删除该项目')
act_open_folder.triggered.connect(lambda: open_folder(self.project.path))
act_archive.triggered.connect(lambda: MessageBox('要归档吗?', '这个功能还没做', self.window()).exec())
act_clear_srt.triggered.connect(lambda: print('这个功能还没做'))
act_clear_ass.triggered.connect(lambda: print('这个功能还没做'))
act_delete_proj.triggered.connect(self._on_act_del_proj)
menu.addActions([
act_open_folder,
act_archive,
])
menu.addSeparator()
menu.addActions([
act_clear_srt,
act_clear_ass,
])
menu.addSeparator()
menu.addAction(act_delete_proj)
# show menu
menu.exec(pos, ani=True)
def _on_act_del_proj(self):
if MessageBox('删除确认', '真的要删除吗?', self.window()).exec():
self.project.delete()
self.window().reload_projects()
|
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
self.label_title = AutoLabel('<Loading>', self, Qt.ElideMiddle)
self.label_title.setObjectName('ViewTitle')
self.btn_manage = ToolButton(FluentIcon.MORE, self)
self.btn_manage.clicked.connect(
lambda: self._on_btn_manage_clicked(
self.btn_manage.mapToGlobal(QPoint()) + QPoint(self.btn_manage.width() + 5, 10)
)
)
self.btn_transcribe = PushButton('开始听写', self, FluentIcon.SEND_FILL)
self.btn_transcribe.clicked.connect(self._run_transcribe)
self._init_signal()
self._init_layout()
def set_project(self, project: Project):
self.project = project
self.label_title.setText(self.project.name)
self.label_title.setToolTip(self.project.name)
self._reload_subtitle_list()
def _init_layout(self):
self.layout_title.addWidget(self.label_title)
self.layout_title.addWidget(self.btn_manage)
self.layout.addLayout(self.layout_title)
self.layout.addLayout(self.layout_subtitles)
self.layout.addStretch(1)
self.layout.addWidget(self.btn_transcribe)
self.layout.setContentsMargins(*CONTAINER_MARGINS)
def _init_signal(self):
self.sig_subtitle_list_loaded.connect(self._on_subtitle_list_loaded)
self.sig_transcribe_running.connect(self._on_transcribe_running_changed)
def _on_transcribe_running_changed(self, running: bool):
if self.state_tooltip is None:
self.state_tooltip = StateToolTip('正在听写中', '请耐心等待', self)
self.state_tooltip.closeButton.hide()
if running:
self.btn_transcribe.setDisabled(True)
self.state_tooltip.move(10, 10)
self.state_tooltip.show()
else:
self.btn_transcribe.setDisabled(False)
self.state_tooltip.setState(True)
self.state_tooltip.setTitle('听写完成!')
self.state_tooltip.setContent('')
self.state_tooltip = None
def _on_subtitle_list_loaded(self, filenames: list):
clear_layout(self.layout_subtitles)
for filename in filenames:
layout = QHBoxLayout(self)
label = AutoLabel(filename, self, Qt.ElideLeft)
label.setToolTip(filename)
btn_translate = ToolButton(FluentIcon.EDIT, self)
btn_translate.setToolTip('编辑')
btn_translate.clicked.connect(self._on_subtitle_edit_clicked(filename))
btn_delete = ToolButton(FluentIcon.DELETE, self)
btn_delete.setToolTip('删除')
btn_delete.clicked.connect(self._on_subtitle_delete_clicked(filename))
layout.addWidget(label)
layout.addWidget(btn_translate)
layout.addWidget(btn_delete)
self.layout_subtitles.addLayout(layout)
def _reload_subtitle_list(self):
self.sig_subtitle_list_loaded.emit(
[
filename
for filename in os.listdir(self.project.path)
if filename.endswith('.srt') or filename.endswith('.ass')
]
)
def _on_subtitle_edit_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
edit_win = SubtitleWindow(target_file)
edit_win.exec_()
return f
def _on_subtitle_delete_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
if MessageBox('删除确认', f'真的要删除 {target_file} 吗?', self.window()).exec():
os.remove(target_file)
self._reload_subtitle_list()
return f
def _on_btn_manage_clicked(self, pos):
menu = RoundMenu(parent=self)
act_open_folder = QAction(FluentIcon.FOLDER.icon(), '打开文件夹')
act_archive = QAction(FluentIcon.SAVE.icon(), '归档')
act_clear_srt = QAction(FluentIcon.DELETE.icon(), '删除所有 SRT 文件')
act_clear_ass = QAction(FluentIcon.DELETE.icon(), '删除所有 ASS 文件')
act_delete_proj = QAction(FluentIcon.DELETE.icon(), '删除该项目')
act_open_folder.triggered.connect(lambda: open_folder(self.project.path))
act_archive.triggered.connect(lambda: MessageBox('要归档吗?', '这个功能还没做', self.window()).exec())
act_clear_srt.triggered.connect(lambda: print('这个功能还没做'))
act_clear_ass.triggered.connect(lambda: print('这个功能还没做'))
act_delete_proj.triggered.connect(self._on_act_del_proj)
menu.addActions([
act_open_folder,
act_archive,
])
menu.addSeparator()
menu.addActions([
act_clear_srt,
act_clear_ass,
])
menu.addSeparator()
menu.addAction(act_delete_proj)
# show menu
menu.exec(pos, ani=True)
def _on_act_del_proj(self):
if MessageBox('删除确认', '真的要删除吗?', self.window()).exec():
self.project.delete()
self.window().reload_projects()
| @run_in_thread
| 6 | 2023-11-07 16:45:43+00:00 | 8k |
openshift/lightspeed-service | ols/app/endpoints/ols.py | [
{
"identifier": "constants",
"path": "ols/app/constants.py",
"snippet": "VALID = \"VALID\"\nINVALID = \"INVALID\"\nYAML = \"YAML\"\nNOYAML = \"NOYAML\"\nSOME_FAILURE = \"some failure\""
},
{
"identifier": "LLMRequest",
"path": "ols/app/models/models.py",
"snippet": "class LLMRequest(BaseModel):\n \"\"\"Model representing a request for the LLM (Language Model).\n\n Attributes:\n query: The query string.\n conversation_id: The optional conversation ID.\n response: The optional response.\n\n Example:\n llm_request = LLMRequest(query=\"Tell me about Kubernetes\")\n \"\"\"\n\n query: str\n conversation_id: Union[str, None] = None\n response: Union[str, None] = None"
},
{
"identifier": "Utils",
"path": "ols/app/utils.py",
"snippet": "class Utils:\n \"\"\"Utility class containing common methods.\"\"\"\n\n @staticmethod\n def get_suid() -> str:\n \"\"\"Generate a unique session ID (SUID) using UUID4.\n\n Returns:\n A unique session ID.\n \"\"\"\n return str(uuid.uuid4().hex)"
},
{
"identifier": "DocsSummarizer",
"path": "ols/src/docs/docs_summarizer.py",
"snippet": "class DocsSummarizer:\n \"\"\"A class for summarizing documentation context.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the DocsSummarizer.\"\"\"\n self.logger = Logger(\"docs_summarizer\").logger\n\n def summarize(self, conversation, query, **kwargs) -> tuple[str, str]:\n \"\"\"Summarize the given query based on the provided conversation context.\n\n Args:\n conversation: The unique identifier for the conversation.\n query: The query to be summarized.\n kwargs: Additional keyword arguments for customization (model, verbose, etc.).\n\n Returns:\n A tuple containing the summary as a string and referenced documents as a string.\n \"\"\"\n provider = config.ols_config.summarizer_provider\n model = config.ols_config.summarizer_model\n bare_llm = LLMLoader(provider, model).llm\n\n verbose = kwargs.get(\"verbose\", \"\").lower() == \"true\"\n\n # Set up llama index to show prompting if verbose is True\n # TODO: remove this, we can't be setting global handlers, it will\n # affect other calls\n if verbose:\n llama_index.set_global_handler(\"simple\")\n\n settings_string = f\"conversation: {conversation}, query: {query}, provider: {provider}, model: {model}, verbose: {verbose}\"\n self.logger.info(f\"{conversation} call settings: {settings_string}\")\n\n summarization_template = PromptTemplate(constants.SUMMARIZATION_TEMPLATE)\n\n self.logger.info(f\"{conversation} Getting service context\")\n self.logger.info(f\"{conversation} using model: {model}\")\n\n embed_model = \"local:BAAI/bge-base-en\"\n # TODO get this from global config instead of env\n # Not a priority because embedding model probably won't be configurable in the final product\n tei_embedding_url = os.getenv(\"TEI_SERVER_URL\", None)\n if tei_embedding_url:\n self.logger.info(f\"{conversation} using TEI embedding server\")\n\n embed_model = TextEmbeddingsInference(\n model_name=constants.TEI_EMBEDDING_MODEL,\n base_url=tei_embedding_url,\n )\n\n service_context = ServiceContext.from_defaults(\n chunk_size=1024, llm=bare_llm, embed_model=embed_model, **kwargs\n )\n\n self.logger.info(\n f\"{conversation} using embed model: {service_context.embed_model!s}\"\n )\n\n # TODO get this from global config\n storage_context = StorageContext.from_defaults(\n persist_dir=constants.PRODUCT_DOCS_PERSIST_DIR\n )\n self.logger.info(f\"{conversation} Setting up index\")\n index = load_index_from_storage(\n storage_context=storage_context,\n index_id=constants.PRODUCT_INDEX,\n service_context=service_context,\n verbose=verbose,\n )\n\n self.logger.info(f\"{conversation} Setting up query engine\")\n query_engine = index.as_query_engine(\n text_qa_template=summarization_template,\n verbose=verbose,\n streaming=False,\n similarity_top_k=1,\n )\n\n self.logger.info(f\"{conversation} Submitting summarization query\")\n summary = query_engine.query(query)\n\n referenced_documents = \"\\n\".join(\n [\n source_node.node.metadata[\"file_name\"]\n for source_node in summary.source_nodes\n ]\n )\n\n self.logger.info(f\"{conversation} Summary response: {summary!s}\")\n self.logger.info(f\"{conversation} Referenced documents: {referenced_documents}\")\n\n return str(summary), referenced_documents"
},
{
"identifier": "LLMLoader",
"path": "ols/src/llms/llm_loader.py",
"snippet": "class LLMLoader:\n \"\"\"Note: This class loads the LLM backend libraries if the specific LLM is loaded.\n\n Known caveats: Currently supports a single instance/model per backend.\n\n llm_backends: a string with a supported llm backend name ('openai','ollama','tgi','watson','bam').\n params : (optional) array of parameters to override and pass to the llm backend\n\n # using the class and overriding specific parameters\n llm_backend = 'ollama'\n params = {'temperature': 0.02, 'top_p': 0.95}\n\n llm_config = LLMLoader(llm_backend=llm_backend, params=params)\n llm_chain = LLMChain(llm=llm_config.llm, prompt=prompt)\n\n \"\"\"\n\n def __init__(\n self,\n provider: Optional[str] = None,\n model: Optional[str] = None,\n url: Optional[str] = None,\n params: Optional[dict] = None,\n logger=None,\n ) -> None:\n \"\"\"Initialize loader using provided provider, model, and other parameters.\"\"\"\n self.logger = logger if logger is not None else Logger(\"llm_loader\").logger\n if provider is None:\n raise Exception(\"ERROR: Missing provider\")\n self.provider = provider\n self.url = url\n if model is None:\n raise Exception(\"ERROR: Missing model\")\n self.model = model\n\n # return empty dictionary if not defined\n self.llm_params = params if params else {}\n self.llm = None\n self._set_llm_instance()\n\n def _set_llm_instance(self):\n self.logger.debug(\n f\"[{inspect.stack()[0][3]}] Loading LLM {self.model} from {self.provider}\"\n )\n # convert to string to handle None or False definitions\n match str(self.provider).lower():\n case constants.PROVIDER_OPENAI:\n self._openai_llm_instance()\n case constants.PROVIDER_OLLAMA:\n self._ollama_llm_instance()\n case constants.PROVIDER_TGI:\n self._tgi_llm_instance()\n case constants.PROVIDER_WATSONX:\n self._watson_llm_instance()\n case constants.PROVIDER_BAM:\n self._bam_llm_instance()\n case _:\n msg = f\"ERROR: Unsupported LLM {self.provider}\"\n self.logger.error(msg)\n raise UnsupportedProvider(msg)\n\n def _openai_llm_instance(self):\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Creating OpenAI LLM instance\")\n try:\n from langchain.chat_models import ChatOpenAI\n except Exception:\n self.logger.error(\n \"ERROR: Missing openai libraries. Skipping loading backend LLM.\"\n )\n return\n provider = config.llm_config.providers[constants.PROVIDER_OPENAI]\n model = provider.models[self.model]\n if model is None:\n raise Exception(\n f\"model {self.model} is not configured for provider {constants.PROVIDER_OPENAI}\"\n )\n params = {\n \"base_url\": provider.url\n if provider.url is not None\n else \"https://api.openai.com/v1\",\n \"api_key\": provider.credentials,\n \"model\": self.model,\n \"model_kwargs\": {}, # TODO: add model args\n \"organization\": os.environ.get(\"OPENAI_ORGANIZATION\", None),\n \"timeout\": os.environ.get(\"OPENAI_TIMEOUT\", None),\n \"cache\": None,\n \"streaming\": True,\n \"temperature\": 0.01,\n \"max_tokens\": 512,\n \"top_p\": 0.95,\n \"frequency_penalty\": 1.03,\n \"verbose\": False,\n }\n params.update(self.llm_params) # override parameters\n self.llm = ChatOpenAI(**params)\n self.logger.debug(f\"[{inspect.stack()[0][3]}] OpenAI LLM instance {self.llm}\")\n\n def _bam_llm_instance(self):\n \"\"\"BAM Research Lab.\"\"\"\n self.logger.debug(f\"[{inspect.stack()[0][3]}] BAM LLM instance\")\n try:\n # BAM Research lab\n from genai.credentials import Credentials\n from genai.extensions.langchain import LangChainInterface\n from genai.schemas import GenerateParams\n except Exception:\n self.logger.error(\n \"ERROR: Missing ibm-generative-ai libraries. Skipping loading backend LLM.\"\n )\n return\n # BAM Research lab\n provider = config.llm_config.providers[constants.PROVIDER_BAM]\n model = provider.models[self.model]\n if model is None:\n raise Exception(\n f\"model {self.model} is not configured for provider {constants.PROVIDER_BAM}\"\n )\n\n creds = Credentials(\n api_key=provider.credentials,\n api_endpoint=provider.url\n if provider.url is not None\n else \"https://bam-api.res.ibm.com\",\n )\n\n bam_params = {\n \"decoding_method\": \"sample\",\n \"max_new_tokens\": 512,\n \"min_new_tokens\": 1,\n \"random_seed\": 42,\n \"top_k\": 10,\n \"top_p\": 0.95,\n \"repetition_penalty\": 1.03,\n \"temperature\": 0.05,\n }\n bam_params.update(self.llm_params) # override parameters\n # remove none BAM params from dictionary\n for k in [\"model\", \"api_key\", \"api_endpoint\"]:\n _ = bam_params.pop(k, None)\n params = GenerateParams(**bam_params)\n\n self.llm = LangChainInterface(\n model=self.model, params=params, credentials=creds\n )\n self.logger.debug(f\"[{inspect.stack()[0][3]}] BAM LLM instance {self.llm}\")\n\n # TODO: update this to use config not direct env vars\n def _ollama_llm_instance(self):\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Creating Ollama LLM instance\")\n try:\n from langchain.llms import Ollama\n except Exception:\n self.logger.error(\n \"ERROR: Missing ollama libraries. Skipping loading backend LLM.\"\n )\n return\n params = {\n \"base_url\": os.environ.get(\"OLLAMA_API_URL\", \"http://127.0.0.1:11434\"),\n \"model\": os.environ.get(\"OLLAMA_MODEL\", \"Mistral\"),\n \"cache\": None,\n \"temperature\": 0.01,\n \"top_k\": 10,\n \"top_p\": 0.95,\n \"repeat_penalty\": 1.03,\n \"verbose\": False,\n \"callback_manager\": CallbackManager([StreamingStdOutCallbackHandler()]),\n }\n params.update(self.llm_params) # override parameters\n self.llm = Ollama(**params)\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Ollama LLM instance {self.llm}\")\n\n # TODO: update this to use config not direct env vars\n def _tgi_llm_instance(self):\n \"\"\"Note: TGI does not support specifying the model, it is an instance per model.\"\"\"\n self.logger.debug(\n f\"[{inspect.stack()[0][3]}] Creating Hugging Face TGI LLM instance\"\n )\n try:\n from langchain.llms import HuggingFaceTextGenInference\n except Exception:\n self.logger.error(\n \"ERROR: Missing HuggingFaceTextGenInference libraries. Skipping loading backend LLM.\"\n )\n return\n params = {\n \"inference_server_url\": os.environ.get(\"TGI_API_URL\", None),\n \"model_kwargs\": {}, # TODO: add model args\n \"max_new_tokens\": 512,\n \"cache\": None,\n \"temperature\": 0.01,\n \"top_k\": 10,\n \"top_p\": 0.95,\n \"repetition_penalty\": 1.03,\n \"streaming\": True,\n \"verbose\": False,\n \"callback_manager\": CallbackManager([StreamingStdOutCallbackHandler()]),\n }\n params.update(self.llm_params) # override parameters\n self.llm = HuggingFaceTextGenInference(**params)\n self.logger.debug(\n f\"[{inspect.stack()[0][3]}] Hugging Face TGI LLM instance {self.llm}\"\n )\n\n # TODO: update this to use config not direct env vars\n def _watson_llm_instance(self):\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Watson LLM instance\")\n # WatsonX (requires WansonX libraries)\n try:\n from ibm_watson_machine_learning.foundation_models import Model\n from ibm_watson_machine_learning.foundation_models.extensions.langchain import (\n WatsonxLLM,\n )\n from ibm_watson_machine_learning.metanames import (\n GenTextParamsMetaNames as GenParams,\n )\n except Exception:\n self.logger.error(\n \"ERROR: Missing ibm_watson_machine_learning libraries. Skipping loading backend LLM.\"\n )\n return\n # WatsonX uses different keys\n creds = {\n # example from https://heidloff.net/article/watsonx-langchain/\n \"url\": self.llm_params.get(\"url\")\n if self.llm_params.get(\"url\") is not None\n else os.environ.get(\"WATSON_API_URL\", None),\n \"apikey\": self.llm_params.get(\"apikey\")\n if self.llm_params.get(\"apikey\") is not None\n else os.environ.get(\"WATSON_API_KEY\", None),\n }\n # WatsonX uses different mechanism for defining parameters\n params = {\n GenParams.DECODING_METHOD: self.llm_params.get(\"decoding_method\", \"sample\"),\n GenParams.MIN_NEW_TOKENS: self.llm_params.get(\"min_new_tokens\", 1),\n GenParams.MAX_NEW_TOKENS: self.llm_params.get(\"max_new_tokens\", 512),\n GenParams.RANDOM_SEED: self.llm_params.get(\"random_seed\", 42),\n GenParams.TEMPERATURE: self.llm_params.get(\"temperature\", 0.05),\n GenParams.TOP_K: self.llm_params.get(\"top_k\", 10),\n GenParams.TOP_P: self.llm_params.get(\"top_p\", 0.95),\n # https://www.ibm.com/docs/en/watsonx-as-a-service?topic=models-parameters\n GenParams.REPETITION_PENALTY: self.llm_params.get(\n \"repeatition_penallty\", 1.03\n ),\n }\n # WatsonX uses different parameter names\n llm_model = Model(\n model_id=self.llm_params.get(\n \"model_id\", os.environ.get(\"WATSON_MODEL\", None)\n ),\n credentials=creds,\n params=params,\n project_id=self.llm_params.get(\n \"project_id\", os.environ.get(\"WATSON_PROJECT_ID\", None)\n ),\n )\n self.llm = WatsonxLLM(model=llm_model)\n self.logger.debug(f\"[{inspect.stack()[0][3]}] Watson LLM instance {self.llm}\")\n\n def status(self):\n \"\"\"Provide LLM schema as a string containing formatted and indented JSON.\"\"\"\n import json\n\n return json.dumps(self.llm.schema_json, indent=4)"
},
{
"identifier": "QuestionValidator",
"path": "ols/src/query_helpers/question_validator.py",
"snippet": "class QuestionValidator:\n \"\"\"This class is responsible for validating questions and providing one-word responses.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initializes the `QuestionValidator` instance.\"\"\"\n self.logger = Logger(\"question_validator\").logger\n\n def validate_question(\n self, conversation: str, query: str, verbose: bool = False\n ) -> list[str]:\n \"\"\"Validates a question and provides a one-word response.\n\n Args:\n conversation: The identifier for the conversation or task context.\n query: The question to be validated.\n verbose: If `LLMChain` should be verbose. Defaults to `False`.\n\n Returns:\n A list of one-word responses.\n \"\"\"\n model = config.ols_config.validator_model\n provider = config.ols_config.validator_provider\n\n settings_string = f\"conversation: {conversation}, query: {query}, provider: {provider}, model: {model}, verbose: {verbose}\"\n self.logger.info(f\"{conversation} call settings: {settings_string}\")\n\n prompt_instructions = PromptTemplate.from_template(\n constants.QUESTION_VALIDATOR_PROMPT_TEMPLATE\n )\n\n self.logger.info(f\"{conversation} Validating query\")\n self.logger.info(f\"{conversation} using model: {model}\")\n\n bare_llm = LLMLoader(\n provider, model, params={\"min_new_tokens\": 1, \"max_new_tokens\": 4}\n ).llm\n\n llm_chain = LLMChain(llm=bare_llm, prompt=prompt_instructions, verbose=verbose)\n\n task_query = prompt_instructions.format(query=query)\n\n self.logger.info(f\"{conversation} task query: {task_query}\")\n\n response = llm_chain(inputs={\"query\": query})\n clean_response = str(response[\"text\"]).strip()\n\n self.logger.info(f\"{conversation} response: {clean_response}\")\n\n if response[\"text\"] not in [\"INVALID,NOYAML\", \"VALID,NOYAML\", \"VALID,YAML\"]:\n raise ValueError(\"Returned response did not match the expected format\")\n\n # will return an array:\n # [INVALID,NOYAML]\n # [VALID,NOYAML]\n # [VALID,YAML]\n return clean_response.split(\",\")"
},
{
"identifier": "YamlGenerator",
"path": "ols/src/query_helpers/yaml_generator.py",
"snippet": "class YamlGenerator:\n \"\"\"This class is responsible for generating YAML responses to user requests.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initializes the `YamlGenerator` instance.\"\"\"\n self.logger = Logger(\"yaml_generator\").logger\n\n def generate_yaml(\n self, conversation_id: str, query: str, history: str | None = None, **kwargs\n ) -> str:\n \"\"\"Generates YAML response to a user request.\n\n Args:\n conversation_id: The identifier for the conversation or task context.\n query: The user request.\n history: The history of the conversation (if available).\n **kwargs: Additional keyword arguments for customization.\n\n Returns:\n The generated YAML response.\n \"\"\"\n model = config.ols_config.validator_model\n provider = config.ols_config.validator_provider\n\n verbose = kwargs.get(\"verbose\", \"\").lower() == \"true\"\n settings_string = f\"conversation: {conversation_id}, query: {query}, provider: {provider}, model: {model}, verbose: {verbose}\"\n self.logger.info(f\"{conversation_id} call settings: {settings_string}\")\n self.logger.info(f\"{conversation_id} using model: {model}\")\n\n bare_llm = LLMLoader(provider, model).llm\n\n if history:\n prompt_instructions = PromptTemplate.from_template(\n constants.YAML_GENERATOR_WITH_HISTORY_PROMPT_TEMPLATE\n )\n task_query = prompt_instructions.format(query=query, history=history)\n else:\n prompt_instructions = PromptTemplate.from_template(\n constants.YAML_GENERATOR_PROMPT_TEMPLATE\n )\n task_query = prompt_instructions.format(query=query)\n\n self.logger.info(f\"{conversation_id} task query: {task_query}\")\n llm_chain = LLMChain(llm=bare_llm, verbose=verbose, prompt=prompt_instructions)\n response = llm_chain(inputs={\"query\": query, \"history\": history})\n self.logger.info(f\"{conversation_id} response:\\n{response['text']}\")\n return response[\"text\"]"
},
{
"identifier": "config",
"path": "ols/utils/config.py",
"snippet": "def load_empty_config() -> None:\ndef load_config_from_env() -> None:"
}
] | from fastapi import APIRouter, HTTPException, status
from ols.app import constants
from ols.app.models.models import LLMRequest
from ols.app.utils import Utils
from ols.src.docs.docs_summarizer import DocsSummarizer
from ols.src.llms.llm_loader import LLMLoader
from ols.src.query_helpers.question_validator import QuestionValidator
from ols.src.query_helpers.yaml_generator import YamlGenerator
from ols.utils import config | 5,517 | """Handlers for all OLS-related REST API endpoints."""
router = APIRouter(prefix="/ols", tags=["ols"])
@router.post("")
def ols_request(llm_request: LLMRequest) -> LLMRequest:
"""Handle requests for the OLS endpoint.
Args:
llm_request: The request containing a query and conversation ID.
Returns:
Response containing the processed information.
"""
logger = config.default_logger
# Initialize variables
previous_input = None
conversation = llm_request.conversation_id
# Generate a new conversation ID if not provided
if conversation is None:
conversation = Utils.get_suid()
logger.info(f"{conversation} New conversation")
else:
previous_input = config.conversation_cache.get(conversation)
logger.info(f"{conversation} Previous conversation input: {previous_input}")
llm_response = LLMRequest(query=llm_request.query, conversation_id=conversation)
# Log incoming request
logger.info(f"{conversation} Incoming request: {llm_request.query}")
# Validate the query
question_validator = QuestionValidator()
validation_result = question_validator.validate_question(
conversation, llm_request.query
)
if validation_result[0] == constants.INVALID:
logger.info(f"{conversation} Question is not about k8s/ocp, rejecting")
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"response": "Sorry, I can only answer questions about "
"OpenShift and Kubernetes. This does not look "
"like something I know how to handle."
},
)
if validation_result[0] == constants.VALID:
logger.info(f"{conversation} Question is about k8s/ocp")
question_type = validation_result[1]
# check if question type is from known categories
if question_type not in {constants.NOYAML, constants.YAML}:
# not known question type has been detected
logger.error(f"Unknown question type {question_type}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"response": "Internal server error. Please try again."},
)
if question_type == constants.NOYAML:
logger.info(
f"{conversation} Question is not about yaml, sending for generic info"
)
# Summarize documentation
| """Handlers for all OLS-related REST API endpoints."""
router = APIRouter(prefix="/ols", tags=["ols"])
@router.post("")
def ols_request(llm_request: LLMRequest) -> LLMRequest:
"""Handle requests for the OLS endpoint.
Args:
llm_request: The request containing a query and conversation ID.
Returns:
Response containing the processed information.
"""
logger = config.default_logger
# Initialize variables
previous_input = None
conversation = llm_request.conversation_id
# Generate a new conversation ID if not provided
if conversation is None:
conversation = Utils.get_suid()
logger.info(f"{conversation} New conversation")
else:
previous_input = config.conversation_cache.get(conversation)
logger.info(f"{conversation} Previous conversation input: {previous_input}")
llm_response = LLMRequest(query=llm_request.query, conversation_id=conversation)
# Log incoming request
logger.info(f"{conversation} Incoming request: {llm_request.query}")
# Validate the query
question_validator = QuestionValidator()
validation_result = question_validator.validate_question(
conversation, llm_request.query
)
if validation_result[0] == constants.INVALID:
logger.info(f"{conversation} Question is not about k8s/ocp, rejecting")
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"response": "Sorry, I can only answer questions about "
"OpenShift and Kubernetes. This does not look "
"like something I know how to handle."
},
)
if validation_result[0] == constants.VALID:
logger.info(f"{conversation} Question is about k8s/ocp")
question_type = validation_result[1]
# check if question type is from known categories
if question_type not in {constants.NOYAML, constants.YAML}:
# not known question type has been detected
logger.error(f"Unknown question type {question_type}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"response": "Internal server error. Please try again."},
)
if question_type == constants.NOYAML:
logger.info(
f"{conversation} Question is not about yaml, sending for generic info"
)
# Summarize documentation | docs_summarizer = DocsSummarizer() | 3 | 2023-11-08 06:29:41+00:00 | 8k |
NicolasZucchet/Online-learning-LR-dependencies | online_lru/train.py | [
{
"identifier": "create_train_state",
"path": "online_lru/train_helpers.py",
"snippet": "def create_train_state(\n model,\n rng,\n retrieval,\n in_dim=1,\n bsz=128,\n seq_len=784,\n weight_decay=0.01,\n padded=False,\n opt_config=\"standard\",\n rec_lr=1e-3,\n lr=1e-3,\n n_accumulation_steps=1,\n):\n \"\"\"\n Initializes the training state using optax\n \"\"\"\n\n if padded:\n if retrieval:\n # For retrieval tasks we have two different sets of \"documents\"\n dummy_input = (jnp.ones((2 * bsz, seq_len, in_dim)), jnp.ones(2 * bsz))\n else:\n # removed length from inputs as masking is taken care in loss\n # dummy_input = (jnp.ones((bsz, seq_len, in_dim)), jnp.ones(bsz))\n dummy_input = jnp.ones((bsz, seq_len, in_dim))\n else:\n dummy_input = jnp.ones((bsz, seq_len, in_dim))\n\n init_rng, dropout_rng = jax.random.split(rng, num=2)\n variables = model.init({\"params\": init_rng, \"dropout\": dropout_rng}, dummy_input)\n\n params = variables[\"params\"]\n init_states = {\n \"traces\": variables.get(\"traces\"),\n \"perturbations\": variables.get(\"perturbations\"),\n }\n\n A_params = [\"theta\", \"nu\", \"A\", \"norm\"] # NOTE: no weight decay on norm params\n B_params = [\"B_re\", \"B_im\", \"B\"]\n C_params = [\"C_re\", \"C_im\", \"C\"]\n gamma_params = [\"gamma_log\"]\n # By default:\n # A, B, gamma (rec): rec learning rate and no weight decay\n # C: global learning rate and weight decay\n if opt_config in [\"standard\"]:\n print(\"configuring standard optimization setup\")\n rec_fn = map_nested_fn(\n lambda k, _: \"rec\"\n if k in A_params + B_params + gamma_params\n else (\"none\" if k in [] else \"regular\")\n )\n tx = optax.multi_transform(\n {\n \"none\": optax.inject_hyperparams(optax.sgd)(learning_rate=0.0),\n \"rec\": optax.inject_hyperparams(optax.adam)(learning_rate=rec_lr),\n \"regular\": optax.inject_hyperparams(optax.adamw)(\n learning_rate=lr, weight_decay=weight_decay\n ),\n },\n rec_fn,\n )\n elif opt_config in [\"Bdecay\"]:\n # Apply weight decay to B (still with rec learning rate)\n print(\"configuring optimization with B in AdamW setup\")\n rec_fn = map_nested_fn(\n lambda k, _: \"rec\"\n if k in A_params + gamma_params\n else (\"none\" if k in B_params else \"regular\")\n )\n tx = optax.multi_transform(\n {\n \"none\": optax.inject_hyperparams(optax.adamw)(\n learning_rate=rec_lr, weight_decay=weight_decay\n ),\n \"rec\": optax.inject_hyperparams(optax.adam)(learning_rate=rec_lr),\n \"regular\": optax.inject_hyperparams(optax.adamw)(\n learning_rate=lr, weight_decay=weight_decay\n ),\n },\n rec_fn,\n )\n\n elif opt_config in [\"Bfast_and_decay\"]:\n # Apply weight decay and global lr to B\n print(\"configuring optimization with B in AdamW setup with lr\")\n rec_fn = map_nested_fn(\n lambda k, _: \"rec\"\n if k in A_params + gamma_params\n else (\"none\" if k in [] else \"regular\")\n )\n tx = optax.multi_transform(\n {\n \"none\": optax.inject_hyperparams(optax.adamw)(learning_rate=0.0),\n \"rec\": optax.inject_hyperparams(optax.adam)(learning_rate=rec_lr),\n \"regular\": optax.inject_hyperparams(optax.adamw)(\n learning_rate=lr, weight_decay=weight_decay\n ),\n },\n rec_fn,\n )\n elif opt_config in [\"Cslow_and_nodecay\"]:\n # No weight decay for C and rec learning rate\n print(\"configuring optimization with C not in AdamW setup\")\n rec_fn = map_nested_fn(\n lambda k, _: \"rec\"\n if k in A_params + B_params + C_params + gamma_params\n else (\"none\" if k in [] else \"regular\")\n )\n tx = optax.multi_transform(\n {\n \"none\": optax.inject_hyperparams(optax.sgd)(learning_rate=0.0),\n \"rec\": optax.inject_hyperparams(optax.adam)(learning_rate=rec_lr),\n \"regular\": optax.inject_hyperparams(optax.adamw)(\n learning_rate=lr, weight_decay=weight_decay\n ),\n },\n rec_fn,\n )\n\n # Add steps aggragator\n tx = optax.MultiSteps(tx, n_accumulation_steps)\n\n def fn_is_complex(x):\n return x.dtype in [jnp.complex64, jnp.complex128]\n\n param_sizes = map_nested_fn(lambda k, param: param.size * (2 if fn_is_complex(param) else 1))(\n params\n )\n print(f\"[*] Trainable Parameters: {sum(jax.tree_util.tree_leaves(param_sizes))}\")\n\n return train_state.TrainState.create(apply_fn=model.apply, params=params, tx=tx), init_states"
},
{
"identifier": "reduce_lr_on_plateau",
"path": "online_lru/train_helpers.py",
"snippet": "def reduce_lr_on_plateau(input, factor=0.2, patience=20, lr_min=1e-6):\n lr, rec_lr, count, new_loss, opt_loss = input\n if new_loss < opt_loss:\n count = 0\n opt_loss = new_loss\n else:\n count += 1\n\n if count > patience:\n lr = factor * lr\n rec_lr = factor * rec_lr\n count = 0\n\n if lr < lr_min:\n lr = lr_min\n if rec_lr < lr_min:\n rec_lr = lr_min\n\n return (\n lr,\n rec_lr,\n count,\n )"
},
{
"identifier": "linear_warmup",
"path": "online_lru/train_helpers.py",
"snippet": "def linear_warmup(step, base_lr, end_step, lr_min=None):\n return base_lr * (step + 1) / end_step"
},
{
"identifier": "cosine_annealing",
"path": "online_lru/train_helpers.py",
"snippet": "def cosine_annealing(step, base_lr, end_step, lr_min=1e-6):\n # https://github.com/deepmind/optax/blob/master/optax/_src/schedule.py#L207#L240\n count = jnp.minimum(step, end_step)\n cosine_decay = 0.5 * (1 + jnp.cos(jnp.pi * count / end_step))\n decayed = (base_lr - lr_min) * cosine_decay + lr_min\n return decayed"
},
{
"identifier": "constant_lr",
"path": "online_lru/train_helpers.py",
"snippet": "def constant_lr(step, base_lr, end_step, lr_min=None):\n return base_lr"
},
{
"identifier": "train_epoch",
"path": "online_lru/train_helpers.py",
"snippet": "def train_epoch(\n state,\n init_states,\n rng,\n model,\n bptt_model,\n trainloader,\n seq_len,\n in_dim,\n lr_params,\n log_n_batches,\n log_loss_every,\n):\n \"\"\"\n Training function for an epoch that loops over batches.\n \"\"\"\n batch_losses = []\n\n decay_function, rec_lr, lr, step, end_step, opt_config, lr_min = lr_params\n\n for i, batch in enumerate(tqdm(trainloader)):\n inputs, labels, real_lengths = prep_batch(batch, seq_len, in_dim)\n mask = create_mask(inputs, real_lengths)\n rng, drop_rng = jax.random.split(rng)\n # Compute parameter metrics once every epoch\n if i == 0:\n params_metrics = get_params_metrics(deepcopy(state.params))\n # Aggregate gradient metrics over first log_n_batches batches\n if log_n_batches > 0 and i < log_n_batches:\n init_states, true_grad, est_grad = compute_grads(\n state,\n init_states,\n rng,\n inputs,\n labels,\n mask,\n model,\n bptt_model,\n )\n curr_grad_metrics = get_grad_metrics(est_grad, true_grad)\n if i == 0:\n grad_metrics = curr_grad_metrics\n else:\n grad_metrics = jax.tree_util.tree_map(\n lambda old, new: (i * old + new) / (i + 1), grad_metrics, curr_grad_metrics\n )\n # Update the model\n state, loss, init_states = train_step(\n state, init_states, drop_rng, inputs, labels, mask, model\n )\n batch_losses.append(loss)\n lr_params = (decay_function, rec_lr, lr, step, end_step, opt_config, lr_min)\n state, step = update_learning_rate_per_step(lr_params, state)\n\n if log_loss_every > 0:\n if i % log_loss_every == 0:\n wandb.log({\"train_loss\": loss})\n\n # Merge and format metrics in a wandb friendly way\n if log_n_batches == 0:\n metrics = format_metrics(params_metrics)\n else:\n metrics = format_metrics(merge_metrics([params_metrics, grad_metrics]))\n\n # Return average loss over batches\n return (state, jnp.mean(jnp.array(batch_losses)), step, init_states, metrics)"
},
{
"identifier": "validate",
"path": "online_lru/train_helpers.py",
"snippet": "def validate(state, model, testloader, seq_len, in_dim):\n \"\"\"Validation function that loops over batches\"\"\"\n losses = []\n losses_unreduced = []\n accs = []\n for i, batch in enumerate(tqdm(testloader)):\n inputs, labels, real_lengths = prep_batch(batch, seq_len, in_dim)\n mask = create_mask(inputs, real_lengths)\n loss, acc, logits, loss_unreduced = eval_step(inputs, labels, mask, state, model)\n\n losses.append(loss)\n losses_unreduced.append(loss_unreduced)\n accs.append(acc)\n\n losses = jnp.stack(losses)\n losses_unreduced = jnp.concatenate(losses_unreduced)\n accs = jax.tree_util.tree_map(lambda *args: jnp.concatenate(args), *accs)\n\n average_loss = jnp.mean(losses)\n losses_unreduced = jnp.mean(losses_unreduced, axis=0) # Average over batch, not time\n average_accs = jax.tree_util.tree_map(lambda x: jnp.mean(x), accs)\n\n time_steps_to_log = [2**i for i in range(1, int(math.log2(seq_len)) + 1)]\n average_loss_per_time_step = {\n f\"Loss (t={t})\": losses_unreduced[t - 1] for t in time_steps_to_log\n }\n\n return average_loss, average_accs, average_loss_per_time_step"
},
{
"identifier": "Datasets",
"path": "online_lru/dataloading.py",
"snippet": "DEFAULT_CACHE_DIR_ROOT = Path(\"./cache_dir/\")\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = dataset_obj.l_max\n IN_DIM = 135 # We should probably stop this from being hard-coded.\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = dataset_obj.l_max\n IN_DIM = 20\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = dataset_obj.dataset_train.tensors[0].shape[1]\n IN_DIM = dataset_obj.d_input\n TRAIN_SIZE = dataset_obj.dataset_train.tensors[0].shape[0]\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = dataset_obj.dataset_train.tensors[0].shape[1]\n IN_DIM = dataset_obj.d_input\n TRAIN_SIZE = dataset_obj.dataset_train.tensors[0].shape[0]\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = 32 * 32\n IN_DIM = 1\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = dataset_obj.l_max\n IN_DIM = len(dataset_obj.vocab)\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = 32 * 32\n IN_DIM = 3\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = 28 * 28\n IN_DIM = 1\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n N_CLASSES = dataset_obj.d_output\n SEQ_LENGTH = 28 * 28\n IN_DIM = 1\n TRAIN_SIZE = len(dataset_obj.dataset_train)\n OUT_DIM = dataset_obj.out_dim\n SEQ_LENGTH = dataset_obj.seq_length_in\n IN_DIM = dataset_obj.in_dim\n TRAIN_SIZE = kwargs[\"train_size\"]\ndef custom_loader(cache_dir: str, bsz: int = 50, seed: int = 42) -> ReturnType:\ndef make_data_loader(\n dset,\n dobj,\n seed: int,\n batch_size: int = 128,\n shuffle: bool = True,\n drop_last: bool = True,\n collate_fn: callable = None,\n):\ndef create_lra_imdb_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, bsz: int = 50, seed: int = 42\n) -> ReturnType:\ndef create_lra_listops_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, bsz: int = 50, seed: int = 42\n) -> ReturnType:\ndef create_lra_path32_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, bsz: int = 50, seed: int = 42\n) -> ReturnType:\ndef create_lra_pathx_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, bsz: int = 50, seed: int = 42\n) -> ReturnType:\ndef create_lra_image_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, seed: int = 42, bsz: int = 128\n) -> ReturnType:\ndef create_lra_aan_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT,\n bsz: int = 50,\n seed: int = 42,\n) -> ReturnType:\ndef create_cifar_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, seed: int = 42, bsz: int = 128\n) -> ReturnType:\ndef create_mnist_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, seed: int = 42, bsz: int = 128\n) -> ReturnType:\ndef create_pmnist_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT, seed: int = 42, bsz: int = 128\n) -> ReturnType:\ndef create_copy_pad_classification_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT,\n pattern_length: int = 10,\n train_samples: int = 100000,\n seed: int = 42,\n bsz: int = 128,\n) -> ReturnType:\ndef create_enwik9_dataset(\n cache_dir: Union[str, Path] = DEFAULT_CACHE_DIR_ROOT,\n seed: int = 42,\n bsz: int = 128,\n seq_len: int = 8,\n train_samples: int = None,\n):"
},
{
"identifier": "BatchClassificationModel",
"path": "online_lru/seq_model.py",
"snippet": "class StackedEncoder(nn.Module):\nclass ClassificationModel(nn.Module):\nclass RetrievalDecoder(nn.Module):\n def setup(self):\n def __call__(self, x):\n def update_gradients(self, grad):\n def setup(self):\n def decode(self, x, var=None):\n def __call__(self, x):\n def cumulative_mean(x):\n def update_gradients(self, grad):\ndef masked_meanpool(x, lengths):\n def setup(self):\n def __call__(self, x):\n L = x.shape[0]"
},
{
"identifier": "init_layer",
"path": "online_lru/rec.py",
"snippet": "def init_layer(layer_cls, **kwargs):\n if layer_cls == \"LRU\":\n layer = LRU\n if layer_cls == \"RNN\":\n layer = RNN\n if layer_cls == \"GRU\":\n layer = GRU\n return partial(layer, **kwargs)"
}
] | from functools import partial
from jax import random
from .train_helpers import (
create_train_state,
reduce_lr_on_plateau,
linear_warmup,
cosine_annealing,
constant_lr,
train_epoch,
validate,
)
from .dataloading import Datasets
from .seq_model import BatchClassificationModel
from .rec import init_layer
import wandb | 4,947 | Main function to train over a certain number of epochs
"""
best_test_loss = 100000000
if args.USE_WANDB:
# Make wandb config dictionary
wandb.init(
project=args.wandb_project,
job_type="model_training",
config=vars(args),
entity=args.wandb_entity,
)
else:
wandb.init(mode="offline")
# Set rec learning rate lr as function of lr
lr = args.lr_base
rec_lr = args.lr_factor * lr
# Set randomness...
print("[*] Setting Randomness...")
key = random.PRNGKey(args.jax_seed)
init_rng, train_rng = random.split(key, num=2)
# Close over additional dataset-specific kwargs
create_dataset_fn = Datasets[args.dataset]
if args.dataset == "copy-pad-classification":
create_dataset_fn = partial(
create_dataset_fn,
pattern_length=args.copy_pattern_length,
train_samples=args.copy_train_samples,
)
elif args.dataset == "enwik9":
create_dataset_fn = partial(
create_dataset_fn,
seq_len=args.enwik9_seq_len,
train_samples=args.enwik9_train_samples,
)
# Dataset dependent logic
if args.dataset in [
"imdb-classification",
"listops-classification",
"aan-classification",
]:
padded = True
if args.dataset in ["aan-classification"]:
# Use retreival model for document matching
retrieval = True
print("Using retrieval model for document matching")
else:
retrieval = False
else:
padded = False
retrieval = False
# Create dataset...
init_rng, key = random.split(init_rng, num=2)
(
trainloader,
valloader,
testloader,
aux_dataloaders,
n_classes,
seq_len,
in_dim,
train_size,
) = create_dataset_fn(args.dir_name, seed=args.jax_seed, bsz=args.bsz)
print(f"[*] Starting training on `{args.dataset}` =>> Initializing...")
# arguments specific to LRU or to RNN class
additional_arguments = {}
if args.layer_cls == "LRU":
additional_arguments["r_min"] = args.r_min
additional_arguments["r_max"] = args.r_max
if args.layer_cls == "RNN":
additional_arguments["activation"] = args.rnn_activation_fn
additional_arguments["scaling_hidden"] = args.rnn_scaling_hidden
elif args.layer_cls == "GRU":
assert args.d_hidden == args.d_model
rec_train = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode=args.training_mode,
**additional_arguments,
)
rec_val = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode="bptt", # bptt mode so rec does not keep in memory states and traces
**additional_arguments,
)
model_cls = partial(
BatchClassificationModel,
rec_type=args.layer_cls,
d_input=in_dim,
d_output=n_classes,
d_model=args.d_model,
n_layers=args.n_layers,
seq_length=seq_len,
padded=padded,
activation=args.activation_fn,
readout=args.readout,
dropout=args.p_dropout,
mode=args.mode,
prenorm=args.prenorm,
multidim=1 + (args.dataset == "copy-pad-classification"),
) # signature: (bool) training -> BatchClassificationModel
model = model_cls(rec=rec_train, training_mode=args.training_mode, training=True)
bptt_model = model_cls(rec=rec_val, training_mode="bptt", training=True)
val_model = model_cls(rec=rec_val, training_mode="bptt", training=False)
# initialize training state (optax) and internal states of the model
|
def train(args):
"""
Main function to train over a certain number of epochs
"""
best_test_loss = 100000000
if args.USE_WANDB:
# Make wandb config dictionary
wandb.init(
project=args.wandb_project,
job_type="model_training",
config=vars(args),
entity=args.wandb_entity,
)
else:
wandb.init(mode="offline")
# Set rec learning rate lr as function of lr
lr = args.lr_base
rec_lr = args.lr_factor * lr
# Set randomness...
print("[*] Setting Randomness...")
key = random.PRNGKey(args.jax_seed)
init_rng, train_rng = random.split(key, num=2)
# Close over additional dataset-specific kwargs
create_dataset_fn = Datasets[args.dataset]
if args.dataset == "copy-pad-classification":
create_dataset_fn = partial(
create_dataset_fn,
pattern_length=args.copy_pattern_length,
train_samples=args.copy_train_samples,
)
elif args.dataset == "enwik9":
create_dataset_fn = partial(
create_dataset_fn,
seq_len=args.enwik9_seq_len,
train_samples=args.enwik9_train_samples,
)
# Dataset dependent logic
if args.dataset in [
"imdb-classification",
"listops-classification",
"aan-classification",
]:
padded = True
if args.dataset in ["aan-classification"]:
# Use retreival model for document matching
retrieval = True
print("Using retrieval model for document matching")
else:
retrieval = False
else:
padded = False
retrieval = False
# Create dataset...
init_rng, key = random.split(init_rng, num=2)
(
trainloader,
valloader,
testloader,
aux_dataloaders,
n_classes,
seq_len,
in_dim,
train_size,
) = create_dataset_fn(args.dir_name, seed=args.jax_seed, bsz=args.bsz)
print(f"[*] Starting training on `{args.dataset}` =>> Initializing...")
# arguments specific to LRU or to RNN class
additional_arguments = {}
if args.layer_cls == "LRU":
additional_arguments["r_min"] = args.r_min
additional_arguments["r_max"] = args.r_max
if args.layer_cls == "RNN":
additional_arguments["activation"] = args.rnn_activation_fn
additional_arguments["scaling_hidden"] = args.rnn_scaling_hidden
elif args.layer_cls == "GRU":
assert args.d_hidden == args.d_model
rec_train = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode=args.training_mode,
**additional_arguments,
)
rec_val = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode="bptt", # bptt mode so rec does not keep in memory states and traces
**additional_arguments,
)
model_cls = partial(
BatchClassificationModel,
rec_type=args.layer_cls,
d_input=in_dim,
d_output=n_classes,
d_model=args.d_model,
n_layers=args.n_layers,
seq_length=seq_len,
padded=padded,
activation=args.activation_fn,
readout=args.readout,
dropout=args.p_dropout,
mode=args.mode,
prenorm=args.prenorm,
multidim=1 + (args.dataset == "copy-pad-classification"),
) # signature: (bool) training -> BatchClassificationModel
model = model_cls(rec=rec_train, training_mode=args.training_mode, training=True)
bptt_model = model_cls(rec=rec_val, training_mode="bptt", training=True)
val_model = model_cls(rec=rec_val, training_mode="bptt", training=False)
# initialize training state (optax) and internal states of the model | state, init_states = create_train_state( | 0 | 2023-11-01 13:18:32+00:00 | 8k |
zhaohengz/CAILA | test.py | [
{
"identifier": "DATA_FOLDER",
"path": "flags.py",
"snippet": "DATA_FOLDER = \"./all_data\""
},
{
"identifier": "dataset",
"path": "data/dataset.py",
"snippet": "class ImageLoader:\nclass CompositionDataset(Dataset):\n def __init__(self, root):\n def __call__(self, img):\ndef dataset_transform(phase, norm_family ='clip'):\ndef filter_data(all_data, pairs_gt, topk = 5):\n def __init__(\n self,\n root,\n phase,\n dataset=None,\n split = 'compositional-split',\n norm_family = 'imagenet',\n subset = False,\n pair_dropout = 0.0,\n return_images = False,\n train_only = False,\n open_world=False\n ):\n def build_data_dict(self, data):\n def insert(map, key, value):\n def parse_split(self):\n def parse_pairs(pair_list):\n def get_split_info(self):\n def get_dict_data(self, data, attrs, objs, pairs):\n def reset_dropout(self):\n def sample_negative(self, attr, obj):\n def sample_mixup(self, attr, obj):\n def sample_affordance(self, attr, obj):\n def sample_train_affordance(self, attr, obj, map, target):\n def set_p(self, p_mixup, p_shift, p_obj_shift):\n def sample_contrastive(self, map, key, num_neg):\n def __getitem__(self, index):\n def __len__(self):"
},
{
"identifier": "Evaluator",
"path": "models/common.py",
"snippet": "class Evaluator:\n\n def __init__(self, dset, model):\n\n self.dset = dset\n\n # Convert text pairs to idx tensors: [('sliced', 'apple'), ('ripe', 'apple'), ...] --> torch.LongTensor([[0,1],[1,1], ...])\n pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.pairs]\n self.train_pairs = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in dset.train_pairs]\n self.pairs = torch.LongTensor(pairs)\n\n # Mask over pairs that occur in closed world\n # Select set based on phase\n if dset.phase == 'train':\n print('Evaluating with train pairs')\n test_pair_set = set(dset.train_pairs)\n test_pair_gt = set(dset.train_pairs)\n elif dset.phase == 'val':\n print('Evaluating with validation pairs')\n test_pair_set = set(dset.val_pairs + dset.train_pairs)\n test_pair_gt = set(dset.val_pairs)\n else:\n print('Evaluating with test pairs')\n test_pair_set = set(dset.test_pairs + dset.train_pairs)\n test_pair_gt = set(dset.test_pairs)\n\n self.test_pair_dict = [(dset.attr2idx[attr], dset.obj2idx[obj]) for attr, obj in test_pair_gt]\n self.test_pair_dict = dict.fromkeys(self.test_pair_dict, 0)\n\n # dict values are pair val, score, total\n for attr, obj in test_pair_gt:\n pair_val = dset.pair2idx[(attr,obj)]\n key = (dset.attr2idx[attr], dset.obj2idx[obj])\n self.test_pair_dict[key] = [pair_val, 0, 0]\n\n if dset.open_world:\n masks = [1 for _ in dset.pairs]\n else:\n masks = [1 if pair in test_pair_set else 0 for pair in dset.pairs]\n\n self.closed_mask = torch.BoolTensor(masks)\n # Mask of seen concepts\n seen_pair_set = set(dset.train_pairs)\n mask = [1 if pair in seen_pair_set else 0 for pair in dset.pairs]\n self.seen_mask = torch.BoolTensor(mask)\n\n # Object specific mask over which pairs occur in the object oracle setting\n oracle_obj_mask = []\n for _obj in dset.objs:\n mask = [1 if _obj == obj else 0 for attr, obj in dset.pairs]\n oracle_obj_mask.append(torch.BoolTensor(mask))\n self.oracle_obj_mask = torch.stack(oracle_obj_mask, 0)\n\n # Decide if the model under evaluation is a manifold model or not\n self.score_model = self.score_manifold_model\n\n # Generate mask for each settings, mask scores, and get prediction labels\n def generate_predictions(self, scores, obj_truth, bias = 0.0, topk = 5): # (Batch, #pairs)\n '''\n Inputs\n scores: Output scores\n obj_truth: Ground truth object\n Returns\n results: dict of results in 3 settings\n '''\n def get_pred_from_scores(_scores, topk):\n '''\n Given list of scores, returns top 10 attr and obj predictions\n Check later\n '''\n _, pair_pred = _scores.topk(topk, dim = 1) #sort returns indices of k largest values\n pair_pred = pair_pred.contiguous().view(-1)\n attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \\\n self.pairs[pair_pred][:, 1].view(-1, topk)\n return (attr_pred, obj_pred)\n\n results = {}\n orig_scores = scores.clone()\n mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension\n scores[~mask] += bias # Add bias to test pairs\n\n # Unbiased setting\n \n # Open world setting --no mask, all pairs of the dataset\n results.update({'open': get_pred_from_scores(scores, topk)})\n results.update({'unbiased_open': get_pred_from_scores(orig_scores, topk)})\n # Closed world setting - set the score for all Non test pairs to -1e10, \n # this excludes the pairs from set not in evaluation\n mask = self.closed_mask.repeat(scores.shape[0], 1)\n closed_scores = scores.clone()\n closed_scores[~mask] = -1e10 \n # closed_orig_scores = orig_scores.clone()\n # closed_orig_scores[~mask] = -1e10\n results.update({'closed': get_pred_from_scores(closed_scores, topk)})\n # results.update({'unbiased_closed': get_pred_from_scores(closed_orig_scores, topk)})\n\n # Object_oracle setting - set the score to -1e10 for all pairs where the true object does Not participate, can also use the closed score\n # mask = self.oracle_obj_mask[obj_truth]\n # oracle_obj_scores = scores.clone()\n # oracle_obj_scores[~mask] = -1e10\n # oracle_obj_scores_unbiased = orig_scores.clone()\n # oracle_obj_scores_unbiased[~mask] = -1e10\n # results.update({'object_oracle': get_pred_from_scores(oracle_obj_scores, 1)})\n # results.update({'object_oracle_unbiased': get_pred_from_scores(oracle_obj_scores_unbiased, 1)})\n results['scores'] = orig_scores\n\n return results\n\n def score_clf_model(self, scores, obj_truth, topk = 5):\n '''\n Wrapper function to call generate_predictions for CLF models\n '''\n attr_pred, obj_pred = scores\n\n # Go to CPU\n attr_pred, obj_pred, obj_truth = attr_pred.to('cpu'), obj_pred.to('cpu'), obj_truth.to('cpu')\n\n # Gather scores (P(a), P(o)) for all relevant (a,o) pairs\n # Multiply P(a) * P(o) to get P(pair)\n attr_subset = attr_pred.index_select(1, self.pairs[:,0]) # Return only attributes that are in our pairs\n obj_subset = obj_pred.index_select(1, self.pairs[:, 1])\n scores = (attr_subset * obj_subset) # (Batch, #pairs)\n\n results = self.generate_predictions(scores, obj_truth)\n results['biased_scores'] = scores\n\n return results\n\n def score_manifold_model(self, scores, obj_truth, bias = 0.0, topk = 5):\n '''\n Wrapper function to call generate_predictions for manifold models\n '''\n # Go to CPU\n # scores = {k: v.to('cpu') for k, v in scores.items()}\n obj_truth = obj_truth.to(device)\n\n # Gather scores for all relevant (a,o) pairs\n '''\n scores = torch.stack(\n [scores[(attr,obj)] for attr, obj in self.dset.pairs], 1\n ) # (Batch, #pairs)\n '''\n # orig_scores = scores.clone()\n results = self.generate_predictions(scores.clone(), obj_truth, bias, topk)\n # results['scores'] = orig_scores\n return results\n\n def score_fast_model(self, scores, obj_truth, bias = 0.0, topk = 5):\n '''\n Wrapper function to call generate_predictions for manifold models\n '''\n \n results = {}\n mask = self.seen_mask.repeat(scores.shape[0],1) # Repeat mask along pairs dimension\n scores[~mask] += bias # Add bias to test pairs\n\n mask = self.closed_mask.repeat(scores.shape[0], 1)\n closed_scores = scores.clone()\n closed_scores[~mask] = -1e10 \n\n _, pair_pred = closed_scores.topk(topk, dim = 1) #sort returns indices of k largest values\n pair_pred = pair_pred.contiguous().view(-1)\n attr_pred, obj_pred = self.pairs[pair_pred][:, 0].view(-1, topk), \\\n self.pairs[pair_pred][:, 1].view(-1, topk)\n\n results.update({'closed': (attr_pred, obj_pred)})\n return results\n\n def evaluate_predictions(self, predictions, attr_truth, obj_truth, pair_truth, allpred, topk = 1):\n # Go to CPU\n attr_truth, obj_truth, pair_truth = attr_truth.to('cpu'), obj_truth.to('cpu'), pair_truth.to('cpu')\n\n pairs = list(\n zip(list(attr_truth.numpy()), list(obj_truth.numpy())))\n \n\n seen_ind, unseen_ind = [], []\n for i in range(len(attr_truth)):\n if pairs[i] in self.train_pairs:\n seen_ind.append(i)\n else:\n unseen_ind.append(i)\n\n \n seen_ind, unseen_ind = torch.LongTensor(seen_ind), torch.LongTensor(unseen_ind)\n def _process(_scores):\n # Top k pair accuracy\n # Attribute, object and pair\n attr_match = (attr_truth.unsqueeze(1).repeat(1, topk) == _scores[0][:, :topk])\n obj_match = (obj_truth.unsqueeze(1).repeat(1, topk) == _scores[1][:, :topk])\n\n # Match of object pair\n match = (attr_match * obj_match).any(1).float()\n attr_match = attr_match.any(1).float()\n obj_match = obj_match.any(1).float()\n # Match of seen and unseen pairs\n seen_match = match[seen_ind]\n unseen_match = match[unseen_ind]\n ### Calculating class average accuracy\n \n # local_score_dict = copy.deepcopy(self.test_pair_dict)\n # for pair_gt, pair_pred in zip(pairs, match):\n # # print(pair_gt)\n # local_score_dict[pair_gt][2] += 1.0 #increase counter\n # if int(pair_pred) == 1:\n # local_score_dict[pair_gt][1] += 1.0\n\n # # Now we have hits and totals for classes in evaluation set\n # seen_score, unseen_score = [], []\n # for key, (idx, hits, total) in local_score_dict.items():\n # score = hits/total\n # if bool(self.seen_mask[idx]) == True:\n # seen_score.append(score)\n # else:\n # unseen_score.append(score)\n \n seen_score, unseen_score = torch.ones(512,5), torch.ones(512,5)\n\n return attr_match, obj_match, match, seen_match, unseen_match, \\\n torch.Tensor(seen_score+unseen_score), torch.Tensor(seen_score), torch.Tensor(unseen_score)\n\n def _add_to_dict(_scores, type_name, stats):\n base = ['_attr_match', '_obj_match', '_match', '_seen_match', '_unseen_match', '_ca', '_seen_ca', '_unseen_ca']\n for val, name in zip(_scores, base):\n stats[type_name + name] = val\n\n ##################### Match in places where corrent object\n # obj_oracle_match = (attr_truth == predictions['object_oracle'][0][:, 0]).float() #object is already conditioned\n # obj_oracle_match_unbiased = (attr_truth == predictions['object_oracle_unbiased'][0][:, 0]).float()\n\n # stats = dict(obj_oracle_match = obj_oracle_match, obj_oracle_match_unbiased = obj_oracle_match_unbiased)\n stats = dict()\n\n #################### Closed world\n closed_scores = _process(predictions['closed'])\n print(closed_scores[1].mean())\n # unbiased_closed = _process(predictions['unbiased_closed'])\n _add_to_dict(closed_scores, 'closed', stats)\n # _add_to_dict(unbiased_closed, 'closed_ub', stats)\n\n #################### Calculating AUC\n scores = predictions['scores']\n # getting score for each ground truth class\n correct_scores = scores[torch.arange(scores.shape[0]), pair_truth][unseen_ind]\n\n # Getting top predicted score for these unseen classes\n max_seen_scores = predictions['scores'][unseen_ind][:, self.seen_mask].topk(topk, dim=1)[0][:, topk - 1]\n\n # Getting difference between these scores\n unseen_score_diff = max_seen_scores - correct_scores\n\n # Getting matched classes at max bias for diff\n unseen_matches = stats['closed_unseen_match'].bool()\n correct_unseen_score_diff = unseen_score_diff[unseen_matches] - 1e-4\n\n # print(correct_unseen_score_diff)\n\n # sorting these diffs\n correct_unseen_score_diff = torch.sort(correct_unseen_score_diff)[0]\n magic_binsize = 20\n # getting step size for these bias values\n # print(correct_unseen_score_diff)\n bias_skip = max(len(correct_unseen_score_diff) // magic_binsize, 1)\n # Getting list\n biaslist = correct_unseen_score_diff[::bias_skip]\n \n seen_match_max = float(stats['closed_seen_match'].mean())\n unseen_match_max = float(stats['closed_unseen_match'].mean())\n\n seen_accuracy, unseen_accuracy = [], []\n\n # Go to CPU\n # base_scores = {k: v.to('cpu') for k, v in allpred.items()}\n obj_truth = obj_truth.to('cpu')\n\n # Gather scores for all relevant (a,o) pairs\n # base_scores = torch.stack(\n # [allpred[(attr,obj)] for attr, obj in self.dset.pairs], 1\n # ) # (Batch, #pairs)\n base_scores = allpred\n\n print(\"Start computing Biases\")\n\n for bias in biaslist:\n scores = base_scores.clone()\n results = self.score_fast_model(scores, obj_truth, bias = bias, topk = topk)\n results = results['closed'] # we only need biased\n results = _process(results)\n seen_match = float(results[3].mean())\n unseen_match = float(results[4].mean())\n # print(seen_match, unseen_match)\n # if seen_match > 0 and unseen_match > 0:\n seen_accuracy.append(seen_match)\n unseen_accuracy.append(unseen_match)\n\n # print(seen_match_max, unseen_match_max)\n # if seen_match_max > 0 and unseen_match_max > 0:\n seen_accuracy.append(seen_match_max)\n unseen_accuracy.append(unseen_match_max)\n seen_accuracy, unseen_accuracy = np.array(seen_accuracy), np.array(unseen_accuracy)\n area = np.trapz(seen_accuracy, unseen_accuracy)\n\n for key in stats:\n stats[key] = float(stats[key].mean())\n\n harmonic_mean = hmean([seen_accuracy, unseen_accuracy], axis = 0)\n max_hm = np.max(harmonic_mean)\n idx = np.argmax(harmonic_mean)\n if idx == len(biaslist):\n bias_term = 1e3\n else:\n bias_term = biaslist[idx]\n stats['biasterm'] = float(bias_term)\n stats['best_unseen'] = np.max(unseen_accuracy)\n stats['best_seen'] = np.max(seen_accuracy)\n stats['AUC'] = area\n stats['hm_unseen'] = unseen_accuracy[idx]\n stats['hm_seen'] = seen_accuracy[idx]\n stats['best_hm'] = max_hm\n return stats"
},
{
"identifier": "load_args",
"path": "utils/utils.py",
"snippet": "def load_args(filename, args):\n with open(filename, 'r') as stream:\n data_loaded = yaml.safe_load(stream)\n for key, group in data_loaded.items():\n for key, val in group.items():\n setattr(args, key, val)"
},
{
"identifier": "configure_model",
"path": "utils/config_model.py",
"snippet": "def configure_model(args, dataset):\n is_open = False\n\n if args.model == 'CAILA':\n model = CAILA(dataset, args)\n model_params = []\n prompt_params = []\n trainnable_params = ['norm', 'adapter', 'projection', 'gating_fn', 'logit_scale', 'primitive_fusion'] \n if args.learnable_prompt:\n trainnable_params.append('token_embedding')\n for name, param in model.named_parameters():\n flag = False\n for x in trainnable_params:\n if x in name:\n param.requires_grad_(True)\n model_params.append(param)\n flag = True\n break\n if flag:\n pass\n elif 'prompt' in name:\n param.requires_grad_(True)\n prompt_params.append(param)\n print(\"Prompt {}\".format(name))\n else:\n param.requires_grad_(False)\n optim_params = [{'params':model_params}, {'params':prompt_params, 'lr': args.lr}]\n optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)\n model.is_open = is_open\n elif args.model == 'plainclip':\n model = PlainClip(dataset, args)\n model_params = []\n prompt_params = []\n trainnable_params = ['norm', 'adapter', 'projection', 'gating_fn', 'logit_scale', 'primitive_fusion'] \n if args.learnable_prompt:\n trainnable_params.append('token_embedding')\n for name, param in model.named_parameters():\n flag = False\n for x in trainnable_params:\n if x in name:\n param.requires_grad_(True)\n model_params.append(param)\n flag = True\n break\n if flag:\n pass\n elif 'prompt' in name:\n param.requires_grad_(True)\n prompt_params.append(param)\n print(\"Prompt {}\".format(name))\n else:\n param.requires_grad_(False)\n optim_params = [{'params':model_params}, {'params':prompt_params, 'lr': args.lr}]\n optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)\n model.is_open = is_open\n optimizer = optim.Adam(optim_params, lr=args.lr, weight_decay=args.wd)\n\n model.is_open = is_open\n\n return model, optimizer"
},
{
"identifier": "parser",
"path": "flags.py",
"snippet": "DATA_FOLDER = \"./all_data\""
}
] | import torch
import torch.backends.cudnn as cudnn
import numpy as np
import tqdm
import os
from torch.utils.tensorboard import SummaryWriter
from flags import DATA_FOLDER
from tqdm import tqdm
from os.path import join as ospj
from data import dataset as dset
from models.common import Evaluator
from utils.utils import load_args
from utils.config_model import configure_model
from flags import parser | 5,098 | # Torch imports
cudnn.benchmark = True
# Python imports
# Local imports
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
load_args(args.config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER, args.data_dir),
phase='train',
split=args.splitname,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world,
dataset=args.dataset
)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
subset=args.subset,
open_world=args.open_world,
norm_family=args.norm_family,
dataset=args.dataset
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
model, _ = configure_model(args, trainset)
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'], strict=True)
model = model.cuda()
model.eval()
| # Torch imports
cudnn.benchmark = True
# Python imports
# Local imports
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
load_args(args.config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER, args.data_dir),
phase='train',
split=args.splitname,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world,
dataset=args.dataset
)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
subset=args.subset,
open_world=args.open_world,
norm_family=args.norm_family,
dataset=args.dataset
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
model, _ = configure_model(args, trainset)
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'], strict=True)
model = model.cuda()
model.eval()
| evaluator = Evaluator(testset, model) | 2 | 2023-11-01 00:54:59+00:00 | 8k |
fortelex/hiveline | hiveline/results/modal_shares.py | [
{
"identifier": "fptf",
"path": "hiveline/models/fptf.py",
"snippet": "def _remove_empty_keys(d):\ndef read_datetime(time_str):\ndef format_datetime(dt):\n def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):\n def to_dict(self):\n def to_json(self):\n def from_dict(json_str):\ndef location_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, location: Location = None, regions: list = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef station_from_json(data: dict | str | None):\n def __init__(self, id: str, station: Station, name: str, location: Location = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stop_from_json(data: dict | str | None):\ndef place_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, stations: list[Station] = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef region_from_json(data: dict | str | None):\n def __init__(self, mode: str):\n def __str__(self):\n def __repr__(self):\n def to_string(self):\n def to_json(self):\n def from_string(mode):\n def __init__(self, id: str, name: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef operator_from_json(data: dict | str | None):\n def __init__(self, id: str, name: str, mode: Mode, routes: list, operator: Operator = None, sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef line_from_json(data: dict | str | None):\n def __init__(self, id: str, line: Line, mode: Mode, stops: list[Station | Stop | Location], sub_mode: str = None):\n def to_dict(self):\n def to_json(self):\ndef route_from_json(data: dict | str | None):\n def __init__(self, arrival: int = None, departure: int = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def __init__(self, id: str, route: Route, mode: Mode, sequence: list[ScheduleSequenceElement], starts,\n sub_mode=None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef schedule_from_json(data: dict | str | None):\n def __init__(self, stop: Stop | Station | Location, arrival: datetime.datetime = None, arrival_delay: int = None,\n arrival_platform: str = None,\n departure: datetime.datetime = None, departure_delay: int = None, departure_platform: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef stopover_from_json(data: dict | str | None):\ndef get_location(place: Location | Station | Stop | Stopover) -> Location | None:\n def __init__(self, amount: float, currency: str):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\ndef price_from_json(data: dict | str | None):\n def __init__(self, origin: Stop | Station | Location, destination: Stop | Station | Location,\n departure: datetime.datetime, arrival: datetime.datetime, mode: Mode, sub_mode: str = None,\n departure_delay: int = None,\n departure_platform: str = None,\n arrival_delay: int = None, arrival_platform: str = None, line: Line = None, direction: str = None,\n stopovers: list[Stopover] = None, schedule: Schedule = None, public: bool = True,\n operator: Operator = None,\n price: Price = None, polyline: str = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\ndef leg_from_json(data: dict | str | None):\n def __init__(self, id: str, legs: list[Leg], price: Price = None):\n def to_dict(self):\n def to_json(self):\n def from_json(json_str):\n def get_departure(self, realtime=True):\n def get_arrival(self, realtime=True):\n def duration(self, realtime=True):\n def get_trace(self) -> list[tuple[tuple[float, float], datetime.datetime, Mode, bool]]:\ndef journey_from_json(data: dict | str | None):\ndef from_json(data: dict | str | None):\nclass Location:\nclass Station:\nclass Stop:\nclass Region:\nclass Mode(Enum):\nclass Operator:\nclass Line:\nclass Route:\nclass ScheduleSequenceElement:\nclass Schedule:\nclass Stopover:\nclass Price:\nclass Leg:\nclass Journey:\n TRAIN = 'train'\n BUS = 'bus'\n WATERCRAFT = 'watercraft'\n TAXI = 'taxi'\n GONDOLA = 'gondola'\n AIRCRAFT = 'aircraft'\n CAR = 'car'\n BICYCLE = 'bicycle'\n WALKING = 'walking'\n UNKNOWN = ''"
},
{
"identifier": "Place",
"path": "hiveline/od/place.py",
"snippet": "class Place():\n\n def __init__(self, place_name: str, year: str):\n '''\n Initialize the place object, load geographical shape and tiling\n Args:\n place_name (str): the place name (ex: 'Konstanz, Germany')\n year (str): the study year\n '''\n self.name = place_name\n self.year = year\n self.shape = ox.geocode_to_gdf(self.name)\n self.bbox = self.shape.envelope[0]\n self.get_tiles()\n self.zones = {}\n # this GeoDataFrame will store the origin destination stats\n self.data = self.tiles.copy()\n # mongo\n self.mongo_db = mongo.get_database()\n self.load_regions()\n \n def merge_places(self, new_name, place_names):\n '''\n Extend the current place with other places (reset place data)\n Args:\n new_name (str): the new place name\n place_names (list of str): the list of place names to add\n '''\n for p in place_names:\n other_place = Place(p, self.year)\n self.tiles = pd.concat([self.tiles, other_place.tiles], ignore_index=True)\n\n self.data = self.tiles.copy()\n self.name = new_name\n self.load_regions()\n\n def get_tiles(self, h3_resolution=8):\n '''\n Compute H3 tiling and select the tiles covering the place shape\n Args:\n h3_resolution (int, default=8): tiling resolution\n '''\n # Create an empty dataframe to write data into\n self.tiles = gpd.GeoDataFrame([], columns=['h3', 'geometry'])\n\n multipolygon = self.shape['geometry'][0]\n # multipolygon to list of polygons\n if multipolygon.geom_type == 'MultiPolygon':\n poly_list = [shapely.geometry.Polygon(poly.exterior.coords).__geo_interface__ for poly in multipolygon.geoms]\n elif multipolygon.geom_type == 'Polygon':\n poly_list = [multipolygon.__geo_interface__]\n else:\n raise Exception('city shape is neither a Polygon nor a MultiPolygon')\n \n for poly_geojson in poly_list:\n # Fill the dictionary with Resolution 8 H3 Hexagons\n h3_hexes = h3.polyfill_geojson(poly_geojson, h3_resolution)\n for h3_hex in h3_hexes:\n h3_geo_boundary = shapely.geometry.Polygon(\n h3.h3_to_geo_boundary(h3_hex, geo_json=True)\n )\n # Append results to dataframe\n self.tiles.loc[len(self.tiles)] = [\n h3_hex,\n h3_geo_boundary,\n ]\n # set coordinates reference system\n if self.tiles.crs == None:\n self.tiles = self.tiles.set_crs(self.shape.crs)\n\n # ensure h3 is int64\n self.tiles['h3'] = self.tiles['h3'].astype('int64')\n\n def merge_to_data(self, gdf):\n '''\n Update (or add) a new field to the data gdf\n Args:\n gdf (GeoDataFrame): the gdf to merge, must contains an 'h3' column\n '''\n if ('geometry' in gdf.columns):\n gdf = gdf.drop(columns='geometry')\n # remove field if already existing\n for field in gdf.drop(columns='h3').columns:\n if field in self.data.columns:\n self.data = self.data.drop(columns=field)\n # merge to data gdf\n self.data = self.data.merge(gdf, on='h3', how='left')\n\n def mongo_cached(collection, match_field_list, fields, extra_transformation=lambda x:x):\n '''\n Decorator to check if data is available in mongo instead of computing it\n (acts like a cache)\n Args:\n loading_function (function): function that loads data from file, outputs a DataFrame or GeoDataFrame\n collection (str): mongo db collection to search in\n match_field_list (dict): the dataframe field name to match with the mongodb field, ex: ['nuts3', 'nuts-3']\n fields (list of str): list of fields to retrieve from mongodb\n extra_transformation (function, default is identity): transform the df coming from mongo\n '''\n # 2 wrappers are needed to pass arguments to the decorator\n def wrapper1(loading_function):\n def wrapper2(self):\n # add year prefix to fields to retrieve\n fields_year = [self.year+'.'+f if f not in ['_id', 'nuts-3', 'shape'] else f for f in fields] \n # search fields in mongo, only for place regions\n match_ids = self.data[match_field_list[0]].to_list()\n result_df = mongo.search(self.mongo_db, collection, match_field_list[1], match_ids, fields_year)\n # call loading function if the search result is empty or incomplete\n if result_df.empty or len(result_df.columns)<2 or len(result_df) < len(match_ids):\n print('Data not in db, computing')\n # split in chunks that can be computed in one go\n chunk_size=300\n tiles_backup = self.tiles.copy()\n data_df = pd.DataFrame()\n for i in range(0, len(self.tiles), chunk_size):\n print('chunk', int(i/chunk_size))\n self.tiles = tiles_backup[i:i+chunk_size]\n chunk_df = loading_function(self)\n data_df = pd.concat([data_df, chunk_df])\n del chunk_df\n self.tiles = tiles_backup.copy()\n else:\n print('Data found in db')\n data_df = extra_transformation(result_df)\n # merge the data to local df\n self.merge_to_data(data_df)\n return wrapper2\n return wrapper1\n \n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['population'], extra_transformation=mongo.transform_from_mongo_extract_year)\n def load_population(self, median_imputation=True, gpkg_path=data_folder+'population_density/kontur_population_20231101.gpkg'):\n '''\n Load the population data in a GeoDataFrame and add it to self.data\n Args:\n median_imputation (boolean, default=True): whether or not to replace missing values with the median\n gpkg_path (str, default): the path to the gpkg data\n '''\n population_gdf = gpd.read_file(gpkg_path, bbox=self.shape)\n # string_to_h3 needed for h3.api.numpy_int (faster)\n population_gdf['h3'] = population_gdf['h3'].apply(h3.string_to_h3)\n\n # ensure h3 is int64\n population_gdf['h3'] = population_gdf['h3'].astype('int64')\n\n population_gdf = population_gdf[population_gdf['h3'].isin(\n self.tiles['h3'])]\n population_gdf = population_gdf.to_crs(self.shape.crs)\n\n # median imputation for missing values\n if median_imputation:\n no_data = self.tiles[~self.tiles['h3'].isin(\n population_gdf['h3'])].copy()\n no_data['population'] = population_gdf['population'].median()\n\n population_gdf = pd.concat([population_gdf, no_data])\n\n return population_gdf\n\n def plot_population(self):\n '''\n Plot the shape and the population density overlay\n '''\n if not 'population' in self.data.columns:\n print('loading population data')\n self.load_population()\n ax = self.shape.plot(color='white')\n ax.set_axis_off()\n self.data.plot(ax=ax, zorder=1, column='population')\n\n def get_zoning(self, multipolygon):\n '''\n Get zoning data from Open Street Map\n '''\n self.zones = {\n 'work_agricultural': ox.features_from_polygon(multipolygon, work_agricultural_tags),\n 'work_industrial': ox.features_from_polygon(multipolygon, work_industrial_tags),\n 'work_commercial': ox.features_from_polygon(multipolygon, work_commercial_tags),\n 'work_office': ox.features_from_polygon(multipolygon, work_office_tags),\n 'work_social': ox.features_from_polygon(multipolygon, work_social_tags),\n 'education': ox.features_from_polygon(multipolygon, education_tags),\n 'leisure': ox.features_from_polygon(multipolygon, leisure_tags),\n 'empty': ox.features_from_polygon(multipolygon, empty_tags),\n }\n\n # keep only points for office as the polygons are badly distributed\n self.zones['work_office'] = only_geo_points(self.zones['work_office'])\n\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['work_industrial'] = only_geo_polygons(self.zones['work_industrial'])\n\n def get_zoning_noparkingland(self, multipolygon):\n '''\n Get zoning data from Open Street Map for no parking land\n '''\n self.zones['no_parking_land'] = ox.features_from_polygon(multipolygon, parking_tags)\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['no_parking_land'] = only_geo_polygons(self.zones['no_parking_land'])\n \n def get_zoning_buildings(self, multipolygon, batch_nb):\n '''\n Get zoning data from Open Street Map for buildings\n Args:\n batch_nb (str): '1' or '2', the batch to get\n '''\n self.zones['buildings'+batch_nb] = ox.features_from_polygon(multipolygon, building_tags[batch_nb])\n # keep only polygons for buildings and industrial landuse due to significant overlap between points and buildings\n self.zones['buildings'+batch_nb] = only_geo_polygons(self.zones['buildings'+batch_nb])\n\n\n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['education', 'leisure', 'empty', 'work', 'building_density'], extra_transformation=mongo.transform_tiles_from_mongo)\n def load_zoning_data(self):\n '''\n Load the zoning data into the data gdf\n Measure the areas of zones of interest (work, education, leisure,...) within each tile\n '''\n # get all the tiles (in current chunk) geometries to a single multipolygon\n multipolygon = shapely.geometry.MultiPolygon(self.tiles['geometry'].to_list())\n # merge intersecting polygons\n multipolygon = multipolygon.buffer(0)\n self.get_zoning(multipolygon)\n self.get_zoning_noparkingland(multipolygon)\n self.get_zoning_buildings(multipolygon, '1')\n self.get_zoning_buildings(multipolygon, '2')\n destination = self.tiles.copy()\n\n # area of a whole single hexagonal tile\n tile_area = self.tiles.to_crs(epsg=6933).head(1)['geometry'].area.item()\n\n for i, tile in destination.iterrows():\n for interest in self.zones.keys():\n # clip zones by hex tile\n if not self.zones[interest].empty:\n local_zoi = gpd.clip(self.zones[interest], tile['geometry']).copy() # zoi = zones of interest\n else:\n local_zoi = gpd.GeoDataFrame()\n # compute interest area in tile\n area = 0\n nb_points = 0\n if len(local_zoi) != 0:\n # replace single points with a defined area\n nb_points = len(only_geo_points(local_zoi))\n area = local_zoi.to_crs(epsg=6933).area.sum()\n destination.loc[i, interest] = area + nb_points * point_area\n # default work rate for non empty area, disabled for now\n # if interest == 'empty':\n # destination.loc[i, 'work'] += (tile_area-area) * \\\n # default_work_coefficient\n\n # combine all work zones into one\n work_zones = [k for k in self.zones.keys() if 'work' in k]\n destination['work'] = destination[work_zones].sum(axis=1)\n # calculate building density for parking\n destination['building_density'] = (destination['buildings1'] + destination['buildings2'] + destination['no_parking_land']) / tile_area\n destination = destination.drop(columns=['buildings1', 'buildings2', 'no_parking_land'])\n return destination\n \n @mongo_cached(collection='tiles', match_field_list=['nuts3', 'nuts-3'], fields=['parking'], extra_transformation=mongo.transform_tiles_from_mongo)\n def load_parking_data(self):\n '''\n Approximate parking probabilities based on building density and input variables \n '''\n tiles_filter = self.data['h3'].isin(self.tiles['h3'])\n destination = self.data.loc[tiles_filter, ['h3', 'building_density']].copy()\n \n # get global parking variables\n prkg_locations = parking_prob.keys()\n prkg_vehicles = parking_prob['destination'].keys()\n\n # calculate parking probabilities for each tile\n for i, tile in destination.iterrows():\n dsty = tile['building_density']\n for p in prkg_locations:\n for v in prkg_vehicles:\n min_prob_bldg_dsty = parking_prob[p][v]['min_prob_bldg_dsty']\n min_prob = parking_prob[p][v]['min_prob']\n max_prob_bldg_dsty = parking_prob[p][v]['max_prob_bldg_dsty']\n max_prob = parking_prob[p][v]['max_prob']\n if dsty >= min_prob_bldg_dsty:\n prob = min_prob\n elif dsty <= max_prob_bldg_dsty:\n prob = max_prob\n else: # min_prob_bldg_dsty > dsty > max_prob_bldg_dsty\n prob = np.round( max_prob - (max_prob - min_prob) * (dsty - max_prob_bldg_dsty)/(min_prob_bldg_dsty - max_prob_bldg_dsty), 4)\n # add columns to destination dataframe\n destination.loc[i,f'parking_{p}_{v}'] = prob\n destination = destination.drop(columns='building_density') # already in the data\n return destination\n\n @mongo_cached(collection='tiles', match_field_list=['h3', '_id'], fields=['nuts-3'])\n def load_regions(self, nuts_file=data_folder+'nuts/NUTS_RG_01M_2021_4326.geojson'):\n '''\n Get the region of each tile (NUTS 3), and load it to the data\n Args:\n nuts_file (str, default): the geojson file containing the official NUTS European regions\n '''\n nuts = gpd.read_file(nuts_file)\n # keep only the most precise level as it contains the other\n nuts3 = nuts[nuts['LEVL_CODE'] == 3][['id', 'geometry']].reset_index(drop=True)\n del nuts\n # nuts regions that intersects with the city (not overlaps)\n place_regions = nuts3.loc[nuts3.intersects(self.shape['geometry'][0]), ['id', 'geometry']]\n place_regions = place_regions.reset_index(drop=True)\n # due to precision differences, the city is overlapping with several regions instead of one\n # regions are defined according to cities boundaries so there should be one region assigned to a city\n # however, a tiled place can span across different regions.\n regions = self.tiles.copy()\n regions['nuts3'] = ''\n # for each tile, compute the intersection area with the regions and keep the largest\n for i, tile in regions.iterrows():\n # check if it intersects before computing the intersection area (otherwise there is a warning)\n intersect = place_regions.intersects(tile['geometry'])\n best_matching_index = place_regions[intersect].intersection(tile['geometry']).to_crs(epsg=6933).area\n if best_matching_index.empty:\n best_matching_index = 0\n else:\n best_matching_index = best_matching_index.argmax()\n regions.loc[i, 'nuts3'] = place_regions.iloc[best_matching_index]['id']\n\n return regions\n \n def load_all(self):\n '''\n Load all the data\n '''\n self.load_population()\n self.load_zoning_data()\n self.load_parking_data()\n\n self.export_place_to_mongo()\n self.export_tiles_to_mongo()\n\n def plot_zoning(self, columns=['population', 'work', 'education', 'leisure'], save_name='filename'):\n '''\n Plot one or several zoning data\n Args:\n columns (list of str): list of columns to plot\n save (str): name of the file to save, the path and city name is automatically added\n '''\n assert len(columns) > 0, 'At least one column is required.'\n for c in columns:\n assert c in self.data.columns, f'The column {c} does not exists in the loaded data.'\n\n nfig = len(columns)\n ncols = (nfig+1)//2\n nrows = 1 if nfig == 1 else 2\n figsize = (3.5*ncols, 3.5*nrows)\n\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)\n for i, c in enumerate(columns):\n if nfig == 1:\n ax = axes\n elif nfig == 2:\n ax = axes[i % 2]\n else:\n ax = axes[i % 2, i//2]\n # add city boundaries\n self.shape.boundary.plot(ax=ax)\n # add column data\n self.data.plot(ax=ax, column=c, colormap='magma')\n ax.set_title(c)\n ax.set_axis_off()\n\n # don't show axis for last subplot\n if nfig > 1 and nfig % 2 == 1:\n axes[1, ncols-1].set_axis_off()\n\n # Display the subplots\n fig.suptitle(self.name)\n if save_name:\n city_name = self.name.split(',')[0]\n plt.savefig(\n data_folder+f'visualization/zoning/{save_name}_{city_name}.png', dpi=300)\n plt.show()\n\n def export_place_to_mongo(self):\n '''\n Push the place data to mongodb\n '''\n n = self.name.split(', ')\n data = {\n 'name': n[0],\n 'country': n[1],\n 'shape': str(self.shape['geometry'][0]),\n 'bbox': str(self.bbox),\n 'tiles': self.tiles['h3'].to_list(),\n 'nuts-3': self.data['nuts3'].unique().tolist(),\n }\n self.mongo_db['places'].update_one({'name': data['name'], 'country': data['country']}, {'$set': data}, upsert=True)\n\n def export_tiles_to_mongo(self):\n '''\n Push the tiles and zoning data to mongodb\n '''\n id_df = self.data[['h3', 'nuts3', 'geometry']].copy()\n id_df['geometry'] = id_df['geometry'].astype(str)\n id_df = id_df.rename(columns={'h3': '_id', 'nuts3': 'nuts-3', 'geometry':'shape'})\n id_array = mongo.df_to_dict(id_df)\n data_df = self.data[['population', 'education', 'leisure', 'empty']].copy()\n data_array = mongo.df_to_dict(data_df)\n for prefix in ['work', 'parking']:\n prefix_df = self.data[[c for c in self.data.columns if prefix in c]].copy()\n if prefix=='work':\n prefix_df = prefix_df.rename(columns={prefix:'total'})\n prefix_array = mongo.df_to_dict(prefix_df)\n # remove prefix\n prefix_array = [{k.replace(prefix+'_', ''):v for k,v in d.items()} for d in prefix_array]\n # merge work with other data\n [d.update({prefix: prefix_array[i]}) for i, d in enumerate(data_array)]\n # add ids and year\n data_array_export = []\n for i, d in enumerate(data_array):\n ids = id_array[i]\n ids.update({self.year: d})\n data_array_export.append(ids)\n # push\n mongo.push_to_collection(self.mongo_db, 'tiles', data_array_export)"
},
{
"identifier": "Journeys",
"path": "hiveline/results/journeys.py",
"snippet": "class Journeys:\nclass JourneyStats:\n def __init__(self, sim_id: str, db=None, use_cache=True, cache=\"./cache\"):\n def __find_all(self):\n def iterate(self) -> Generator[Options, None, None]:\n def iterate_selection(self, selection: list[str | None]) -> Generator[Option, None, None]:\n def iterate_traces(self, selection=None) -> Generator[list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]], None, None]:\n def get_selection(self, decision: Callable[[Options], Option | None], max_count=None) -> list[str | None]:\n def __load_cache(self):\n def __save_cache(self, options: list[Options]):\n def prepare_traces(self):\n def __init__(self):\n def to_dict(self):\n def get_all_modal_shares(self):\n def get_transit_modal_share(self):\ndef __approx_dist(origin: tuple[float, float], destination: tuple[float, float]):\ndef get_option_stats(option: Option, shape: Polygon | None = None) -> JourneyStats:\ndef filter_trace(trace: list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]], polygon: Polygon):\ndef get_trace_stats(trace: list[tuple[tuple[float, float], datetime.datetime, fptf.Mode, bool]]) -> JourneyStats:\ndef __approx_dist_fptf(origin: fptf.Location, destination: fptf.Location):\ndef __get_distance(leg: fptf.Leg) -> float:\ndef get_journey_stats(journey: fptf.Journey) -> JourneyStats:"
},
{
"identifier": "ensure_directory",
"path": "hiveline/routing/util.py",
"snippet": "def ensure_directory(path):\n \"\"\"\n Ensures that the given directory exists. If it does not exist, it will be created.\n :param path: The path to the directory\n :return:\n \"\"\"\n if not os.path.isdir(path):\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)"
}
] | import datetime
import random
import uuid
import osmnx
import hiveline.vc.vc_extract as vc_extract
from matplotlib import pyplot as plt
from hiveline.models import fptf
from hiveline.od.place import Place
from hiveline.results.journeys import Journeys, Option, Options, get_option_stats, JourneyStats
from hiveline.routing.util import ensure_directory | 6,852 |
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Params:
"""
Simulation parameters for congestion and modal share analysis
"""
num_citizens = 2000000
vehicle_factor = 0.00007
vcs_car_usage_start = 0.5
mix_factor = 0.1
max_iterations = 100
car_ownership_override = 0 # probability that a vc will own a car even though they don't have one. all of these would use it as well.
car_usage_override = 0 # probability that a car owner would choose a car even though there is no parking
|
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Params:
"""
Simulation parameters for congestion and modal share analysis
"""
num_citizens = 2000000
vehicle_factor = 0.00007
vcs_car_usage_start = 0.5
mix_factor = 0.1
max_iterations = 100
car_ownership_override = 0 # probability that a vc will own a car even though they don't have one. all of these would use it as well.
car_usage_override = 0 # probability that a car owner would choose a car even though there is no parking
| def decide(options: Options, params: Params = None) -> Option | None: | 2 | 2023-11-07 15:34:04+00:00 | 8k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/uhppoted/const.py",
"snippet": "DOMAIN = 'uhppoted'"
},
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BROADCAST_ADDR = 'broadcast_address'"
},
{
"identifier": "CONF_LISTEN_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_LISTEN_ADDR = 'listen_address'"
},
{
"identifier": "CONF_DEBUG",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_DEBUG = 'debug'"
},
{
"identifier": "ATTR_ADDRESS",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_ADDRESS = 'address'"
},
{
"identifier": "ATTR_NETMASK",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_NETMASK = 'netmask'"
},
{
"identifier": "ATTR_GATEWAY",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_GATEWAY = 'gateway'"
},
{
"identifier": "ATTR_FIRMWARE",
"path": "custom_components/uhppoted/const.py",
"snippet": "ATTR_FIRMWARE = 'firmware'"
},
{
"identifier": "configure_controllers",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_controllers(options, f):\n if CONF_CONTROLLERS in options:\n controllers = options[CONF_CONTROLLERS]\n\n for c in controllers:\n controller = f'{c[CONF_CONTROLLER_ID]}'.strip()\n serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()\n address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()\n\n f(controller, serial_no, address)"
},
{
"identifier": "configure_doors",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_doors(options, g):\n if CONF_CONTROLLERS in options and CONF_DOORS in options:\n controllers = options[CONF_CONTROLLERS]\n doors = options[CONF_DOORS]\n\n for c in controllers:\n controller = f'{c[CONF_CONTROLLER_ID]}'.strip()\n serial_no = f'{c[CONF_CONTROLLER_SERIAL_NUMBER]}'.strip()\n address = f'{c[CONF_CONTROLLER_ADDR]}'.strip()\n\n for d in doors:\n door = f'{d[CONF_DOOR_ID]}'.strip()\n door_no = f'{d[CONF_DOOR_NUMBER]}'.strip()\n door_controller = f'{d[CONF_DOOR_CONTROLLER]}'.strip()\n\n if door_controller == controller:\n g(controller, serial_no, door, door_no)"
},
{
"identifier": "configure_cards",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_cards(options, f):\n if CONF_CARDS in options:\n cards = options[CONF_CARDS]\n for c in cards:\n card = f'{c[CONF_CARD_NUMBER]}'.strip()\n name = f'{c[CONF_CARD_NAME]}'.strip()\n unique_id = f'{c[CONF_CARD_UNIQUE_ID]}'.strip()\n\n f(card, name, unique_id)"
},
{
"identifier": "configure_driver",
"path": "custom_components/uhppoted/config.py",
"snippet": "def configure_driver(options):\n bind = options[CONF_BIND_ADDR]\n broadcast = options[CONF_BROADCAST_ADDR]\n listen = options[CONF_LISTEN_ADDR]\n debug = options[CONF_DEBUG]\n\n if CONF_CONTROLLERS in options:\n controllers = [int(f'{v[CONF_CONTROLLER_SERIAL_NUMBER]}') for v in options[CONF_CONTROLLERS]]\n else:\n controllers = []\n\n return {\n 'api': uhppote.Uhppote(bind, broadcast, listen, debug),\n 'controllers': controllers,\n }"
},
{
"identifier": "ControllerInfo",
"path": "custom_components/uhppoted/controller.py",
"snippet": "class ControllerInfo(SensorEntity):\n _attr_icon = 'mdi:identifier'\n _attr_has_entity_name: True\n _attr_translation_key = 'controller_id'\n\n def __init__(self, u, controller, serial_no):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller} {serial_no}')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self._name = f'uhppoted.controller.{controller}.info'.lower()\n self._state = None\n self._attributes: Dict[str, Any] = {\n ATTR_ADDRESS: '',\n ATTR_NETMASK: '',\n ATTR_GATEWAY: '',\n ATTR_FIRMWARE: '',\n }\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.controller.{self.controller}.info'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._state != None:\n return f'{self._state}'\n\n return None\n\n @property\n def extra_state_attributes(self) -> Dict[str, Any]:\n return self._attributes\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update info')\n try:\n response = self.uhppote.get_controller(self.serial_no)\n\n if response.controller == self.serial_no:\n self._state = response.controller\n self._available = True\n self._attributes[ATTR_ADDRESS] = f'{response.ip_address}'\n self._attributes[ATTR_NETMASK] = f'{response.subnet_mask}'\n self._attributes[ATTR_GATEWAY] = f'{response.gateway}'\n self._attributes[ATTR_FIRMWARE] = f'{response.version} {response.date:%Y-%m-%d}'\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} information')"
},
{
"identifier": "ControllerDoor",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoor(SensorEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door}')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}'.lower()\n self._unlocked = None\n self._open = None\n self._button = None\n self._available = False\n\n self._attributes: Dict[str, Any] = {\n ATTR_DOOR_CONTROLLER: f'{serial_no}',\n ATTR_DOOR_NUMBER: f'{door_id}',\n }\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n s = []\n if self._button == True:\n s.append('PRESSED')\n\n if self._unlocked == False:\n s.append('LOCKED')\n elif self._unlocked == True:\n s.append('UNLOCKED')\n\n if self._open == False:\n s.append('CLOSED')\n elif self._open == True:\n s.append('OPEN')\n\n return ' '.join(s)\n\n return None\n\n @property\n def extra_state_attributes(self) -> Dict[str, Any]:\n return self._attributes\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door} state')\n try:\n response = self.uhppote.get_status(self.serial_no)\n\n if response.controller == self.serial_no:\n if self.door_id == 1:\n self._open = response.door_1_open == True\n self._button = response.door_1_button == True\n self._unlocked = response.relays & 0x01 == 0x01\n elif self.door_id == 2:\n self._open = response.door_2_open == True\n self._button = response.door_2_button == True\n self._unlocked = response.relays & 0x02 == 0x02\n elif self.door_id == 3:\n self._open = response.door_3_open == True\n self._button = response.door_3_button == True\n self._unlocked = response.relays & 0x04 == 0x04\n elif self.door_id == 4:\n self._open = response.door_4_open == True\n self._button = response.door_4_button == True\n self._unlocked = response.relays & 0x08 == 0x08\n else:\n self._open = None\n self._button = None\n self._unlocked = None\n\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} status')"
},
{
"identifier": "ControllerDoorOpen",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoorOpen(SensorEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door} open')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}.open'.lower()\n self._open = None\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}.open'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n if self._open == False:\n return 'CLOSED'\n elif self._open == True:\n return 'OPEN'\n\n return None\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door}.open state')\n try:\n response = self.uhppote.get_status(self.serial_no)\n\n if response.controller == self.serial_no:\n if self.door_id == 1:\n self._open = response.door_1_open == True\n elif self.door_id == 2:\n self._open = response.door_2_open == True\n elif self.door_id == 3:\n self._open = response.door_3_open == True\n elif self.door_id == 4:\n self._open = response.door_4_open == True\n else:\n self._open = None\n\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} status')"
},
{
"identifier": "ControllerDoorLock",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoorLock(SensorEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door} lock')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}.lock'.lower()\n self._unlocked = None\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}.lock'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n if self._unlocked == False:\n return 'LOCKED'\n elif self._unlocked == True:\n return 'UNLOCKED'\n\n return None\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door}.lock state')\n try:\n response = self.uhppote.get_status(self.serial_no)\n\n if response.controller == self.serial_no:\n if self.door_id == 1:\n self._unlocked = response.relays & 0x01 == 0x01\n elif self.door_id == 2:\n self._unlocked = response.relays & 0x02 == 0x02\n elif self.door_id == 3:\n self._unlocked = response.relays & 0x04 == 0x04\n elif self.door_id == 4:\n self._unlocked = response.relays & 0x08 == 0x08\n else:\n self._unlocked = None\n\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} status')"
},
{
"identifier": "ControllerDoorButton",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoorButton(SensorEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door} button')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}.button'.lower()\n self._pressed = None\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}.button'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n if self._pressed == True:\n return 'PRESSED'\n elif self._pressed == False:\n return 'RELEASED'\n\n return None\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door} button state')\n try:\n response = self.uhppote.get_status(self.serial_no)\n\n if response.controller == self.serial_no:\n if self.door_id == 1:\n self._pressed = response.door_1_button == True\n elif self.door_id == 2:\n self._pressed = response.door_2_button == True\n elif self.door_id == 3:\n self._pressed = response.door_3_button == True\n elif self.door_id == 4:\n self._pressed = response.door_4_button == True\n else:\n self._pressed = None\n\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} status')"
},
{
"identifier": "ControllerDoorMode",
"path": "custom_components/uhppoted/door.py",
"snippet": "class ControllerDoorMode(SelectEntity):\n _attr_icon = 'mdi:door'\n _attr_has_entity_name: True\n\n def __init__(self, u, controller, serial_no, door, door_id):\n super().__init__()\n\n _LOGGER.debug(f'controller {controller}: door:{door} mode')\n\n self.uhppote = u\n self.controller = controller\n self.serial_no = int(f'{serial_no}')\n self.door = door\n self.door_id = int(f'{door_id}')\n\n self._name = f'uhppoted.door.{door}.mode'.lower()\n self._mode = None\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.door.{self.door}.mode'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def options(self):\n return ['CONTROLLED', 'LOCKED', 'UNLOCKED']\n\n @property\n def current_option(self) -> Optional[str]:\n if self._available:\n if self._mode == 1:\n return 'UNLOCKED'\n elif self._mode == 2:\n return 'LOCKED'\n elif self._mode == 3:\n return 'CONTROLLED'\n else:\n return 'UNKNOWN'\n\n return None\n\n async def async_select_option(self, option):\n if option == 'UNLOCKED':\n self._mode = 1\n elif option == 'LOCKED':\n self._mode = 2\n elif option == 'CONTROLLED':\n self._mode = 3\n\n try:\n response = self.uhppote.get_door_control(self.serial_no, self.door_id)\n if response.controller == self.serial_no and response.door == self.door_id:\n mode = self._mode\n delay = response.delay\n response = self.uhppote.set_door_control(self.serial_no, self.door_id, mode, delay)\n\n if response.controller == self.serial_no and response.door == self.door_id:\n _LOGGER.info(f'set door {self.door} mode ({option})')\n self._mode = response.mode\n self._available = True\n else:\n raise ValueError(f'failed to set controller {self.controller} door {self.door} mode')\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} door {self.door} mode')\n\n async def async_update(self):\n _LOGGER.debug(f'controller:{self.controller} update door {self.door} mode')\n try:\n response = self.uhppote.get_door_control(self.serial_no, self.door_id)\n\n if response.controller == self.serial_no and response.door == self.door_id:\n self._mode = response.mode\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving controller {self.controller} door {self.door} mode')"
},
{
"identifier": "CardInfo",
"path": "custom_components/uhppoted/card.py",
"snippet": "class CardInfo(SensorEntity):\n _attr_icon = 'mdi:card-account-details'\n _attr_has_entity_name: True\n\n def __init__(self, u, card, name, unique_id):\n super().__init__()\n\n _LOGGER.debug(f'card {card}')\n\n self.driver = u\n self.card = int(f'{card}')\n\n self._unique_id = unique_id\n self._name = f'uhppoted.card.{card}.info'.lower()\n self._cardholder = name\n self._start_date = None\n self._end_date = None\n self._permissions = None\n self._available = False\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.card.{self._unique_id}.info'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n today = date.today()\n state = []\n\n if self._cardholder.strip() != '':\n state.append(self._cardholder)\n\n if self._start_date and self._start_date <= today and self._end_date and self._end_date >= today:\n state.append('VALID')\n elif self._start_date and self._start_date > today:\n state.append('NOT VALID')\n elif self._end_date and self._end_date < today:\n state.append('EXPIRED')\n\n if self._permissions and len(self._permissions) < 1:\n state.append('NO ACCESS')\n\n return ', '.join(state)\n\n return None\n\n @property\n def extra_state_attributes(self) -> Dict[str, Any]:\n permissions = f\"','.join(self._permissions)\" if self._permissions else None\n return {\n ATTR_CARD_HOLDER: self._cardholder,\n ATTR_CARD_STARTDATE: self._start_date,\n ATTR_CARD_ENDDATE: self._end_date,\n ATTR_CARD_PERMISSIONS: permissions,\n }\n\n async def async_update(self):\n _LOGGER.debug(f'card:{self.card} state')\n try:\n start_date = None\n end_date = None\n for controller in self.driver['controllers']:\n response = self.driver['api'].get_card(controller, self.card)\n\n if response.controller == controller and response.card_number == self.card:\n if not start_date or response.start_date < start_date:\n start_date = response.start_date\n\n if not end_date or response.end_date > end_date:\n end_date = response.end_date\n\n self._start_date = start_date\n self._end_date = end_date\n self._available = True\n\n except (Exception):\n self._available = False\n _LOGGER.exception(f'error retrieving card {self.card} state')"
},
{
"identifier": "CardHolder",
"path": "custom_components/uhppoted/card.py",
"snippet": "class CardHolder(SensorEntity):\n _attr_icon = 'mdi:card-account-details'\n _attr_has_entity_name: True\n\n def __init__(self, u, card, name, unique_id):\n super().__init__()\n\n _LOGGER.debug(f'card {card}')\n\n self.driver = u\n self.card = int(f'{card}')\n\n self._unique_id = unique_id\n self._name = f'uhppoted.card.{card}.cardholder'.lower()\n self._cardholder = name\n self._available = True\n self._attributes: Dict[str, Any] = {}\n\n @property\n def unique_id(self) -> str:\n return f'uhppoted.card.{self._unique_id}.cardholder'.lower()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def available(self) -> bool:\n return self._available\n\n @property\n def state(self) -> Optional[str]:\n if self._available:\n return self._cardholder\n\n return None\n\n @property\n def extra_state_attributes(self) -> Dict[str, Any]:\n return self._attributes\n\n async def async_update(self):\n _LOGGER.debug(f'card:{self.card} cardholder')\n self._available = True"
}
] | import datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import ATTR_ADDRESS
from .const import ATTR_NETMASK
from .const import ATTR_GATEWAY
from .const import ATTR_FIRMWARE
from .config import configure_controllers
from .config import configure_doors
from .config import configure_cards
from .config import configure_driver
from .controller import ControllerInfo
from .door import ControllerDoor
from .door import ControllerDoorOpen
from .door import ControllerDoorLock
from .door import ControllerDoorButton
from .door import ControllerDoorMode
from .card import CardInfo
from .card import CardHolder | 6,159 | from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
config = entry.data
options = entry.options
u = configure_driver(options)
entities = []
def f(controller, serial_no, address):
entities.extend([
ControllerInfo(u['api'], controller, serial_no),
])
def g(controller, serial_no, door, door_no):
entities.extend([
ControllerDoor(u['api'], controller, serial_no, door, door_no),
ControllerDoorOpen(u['api'], controller, serial_no, door, door_no),
ControllerDoorLock(u['api'], controller, serial_no, door, door_no),
ControllerDoorButton(u['api'], controller, serial_no, door, door_no),
])
def h(card, name, unique_id):
entities.extend([
CardInfo(u, card, name, unique_id),
CardHolder(u, card, name, unique_id),
])
configure_controllers(options, f)
configure_doors(options, g)
| from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
config = entry.data
options = entry.options
u = configure_driver(options)
entities = []
def f(controller, serial_no, address):
entities.extend([
ControllerInfo(u['api'], controller, serial_no),
])
def g(controller, serial_no, door, door_no):
entities.extend([
ControllerDoor(u['api'], controller, serial_no, door, door_no),
ControllerDoorOpen(u['api'], controller, serial_no, door, door_no),
ControllerDoorLock(u['api'], controller, serial_no, door, door_no),
ControllerDoorButton(u['api'], controller, serial_no, door, door_no),
])
def h(card, name, unique_id):
entities.extend([
CardInfo(u, card, name, unique_id),
CardHolder(u, card, name, unique_id),
])
configure_controllers(options, f)
configure_doors(options, g) | configure_cards(options, h) | 11 | 2023-11-06 18:46:49+00:00 | 8k |
shadowpa0327/FLORA | data/build.py | [
{
"identifier": "create_transform",
"path": "data/augmentation/transforms_factory.py",
"snippet": "def create_transform(\n input_size,\n is_training=False,\n use_prefetcher=False,\n no_aug=False,\n scale=None,\n ratio=None,\n hflip=0.5,\n vflip=0.,\n color_jitter=0.4,\n auto_augment=None,\n interpolation='bilinear',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n re_prob=0.,\n re_mode='const',\n re_count=1,\n re_num_splits=0,\n crop_pct=None,\n tf_preprocessing=False,\n separate=False):\n\n if isinstance(input_size, (tuple, list)):\n img_size = input_size[-2:]\n else:\n img_size = input_size\n\n if tf_preprocessing and use_prefetcher:\n assert not separate, \"Separate transforms not supported for TF preprocessing\"\n from .tf_preprocessing import TfPreprocessTransform\n transform = TfPreprocessTransform(\n is_training=is_training, size=img_size, interpolation=interpolation)\n else:\n if is_training and no_aug:\n assert not separate, \"Cannot perform split augmentation with no_aug\"\n transform = transforms_noaug_train(\n img_size,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std)\n elif is_training:\n transform = transforms_imagenet_train(\n img_size,\n scale=scale,\n ratio=ratio,\n hflip=hflip,\n vflip=vflip,\n color_jitter=color_jitter,\n auto_augment=auto_augment,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std,\n re_prob=re_prob,\n re_mode=re_mode,\n re_count=re_count,\n re_num_splits=re_num_splits,\n separate=separate)\n else:\n assert not separate, \"Separate transforms not supported for validation preprocessing\"\n transform = transforms_imagenet_eval(\n img_size,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std,\n crop_pct=crop_pct)\n\n return transform"
},
{
"identifier": "Mixup",
"path": "data/augmentation/mixup.py",
"snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n assert self.mode in ['batch', 'pair', 'elem', 'pair2'], 'Invalid mode: {}'.format(self.mode)\n assert self.mode in ['pair2'], 'The mode of mixup should be `pair2` when saving logits'\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np_random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np_random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),\n np_random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np_random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np_random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(np_random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np_random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np_random.rand() < self.switch_prob\n lam_mix = np_random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np_random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np_random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np_random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n return lam\n\n def _mix_pair2(self, x, seeds):\n assert seeds is not None, \"seeds must be provided when mode is `pair2` in mixup\"\n batch_size = len(x)\n lam_batch = np.ones(batch_size, dtype=np.float32)\n\n for i in range(0, batch_size, 2):\n # for each pair x[i] and x[i + 1]\n seed = int(seeds[i] ^ seeds[i + 1])\n with AugRandomContext(seed=seed):\n lam, use_cutmix = self._params_per_batch()\n lam_batch[i:i+2] = lam\n if lam == 1.:\n continue\n if use_cutmix:\n # cutmix\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i:i+2, :, yl:yh, xl:xh] = x[i:i+2].flip(0)[:, :, yl:yh, xl:xh]\n else:\n # mixup\n x_flipped = x[i:i+2].flip(0).mul_(1. - lam)\n x[i:i+2].mul_(lam).add_(x_flipped)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def __call__(self, x, target, seeds=None):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n lam = self._mix_elem(x)\n elif self.mode == 'pair':\n lam = self._mix_pair(x)\n elif self.mode == 'pair2':\n lam = self._mix_pair2(x, seeds)\n else:\n lam = self._mix_batch(x)\n if target is not None:\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)\n return x, target"
},
{
"identifier": "DatasetWrapper",
"path": "data/augmentation/dataset_wrapper.py",
"snippet": "class DatasetWrapper(torch.utils.data.Dataset):\n def __init__(self, dataset, logits_path, topk, write):\n super().__init__()\n self.dataset = dataset\n self.logits_path = logits_path\n self.epoch = multiprocessing.Value('i', 0)\n self.topk = topk\n self.write_mode = write\n self.keys = self._get_keys()\n self._manager = (None, None)\n\n def __getitem__(self, index: int):\n if self.write_mode:\n return self.__getitem_for_write(index)\n return self.__getitem_for_read(index)\n\n def __getitem_for_write(self, index: int):\n # get an augmentation seed\n key = self.keys[index]\n seed = np.int32(np.random.randint(0, 1 << 31))\n with AugRandomContext(seed=int(seed)):\n item = self.dataset[index]\n return (item, (key, seed))\n\n def __getitem_for_read(self, index: int):\n key = self.keys[index]\n seed, logits_index, logits_value = self._get_saved_logits(key)\n with AugRandomContext(seed=seed):\n item = self.dataset[index]\n return (item, (logits_index, logits_value, np.int32(seed)))\n\n def _get_saved_logits(self, key: str):\n manager = self.get_manager()\n bstr: bytes = manager.read(key)\n # parse the augmentation seed\n seed = int(np.frombuffer(bstr[:4], dtype=np.int32))\n # parse the logits index and value\n # copy logits_index and logits_value to avoid warning of written flag from PyTorch\n bstr = bstr[4:]\n logits_index = np.frombuffer(\n bstr[:self.topk * 2], dtype=np.int16).copy()\n bstr = bstr[self.topk * 2:]\n logits_value = np.frombuffer(\n bstr[:self.topk * 2], dtype=np.float16).copy()\n return seed, logits_index, logits_value\n\n def _build_manager(self, logits_path: str):\n # topk * [idx, value] * 2 bytes for logits + 4 bytes for seed\n item_size = self.topk * 2 * 2 + 4\n rank = get_rank()\n return TxtManager(logits_path, item_size, rank)\n\n def set_epoch(self, epoch: int):\n self.epoch.value = epoch\n self._manager = (None, None)\n\n def get_manager(self):\n epoch = self.epoch.value\n if epoch != self._manager[0]:\n logits_path = os.path.join(\n self.logits_path, f'logits_top{self.topk}_epoch{self.epoch.value}')\n self._manager = (epoch, self._build_manager(logits_path))\n return self._manager[1]\n\n def __len__(self):\n return len(self.dataset)\n\n def _get_keys(self):\n if hasattr(self.dataset, 'get_keys'):\n keys = self.dataset.get_keys()\n if self.write_mode:\n # we only check key unique in the write mode\n assert len(keys) == len(set(keys)), 'keys must be unique'\n return keys\n return [str(i) for i in range(len(self))]"
},
{
"identifier": "MyDistributedSampler",
"path": "data/sampler.py",
"snippet": "class MyDistributedSampler(Sampler[T_co]):\n r\"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each\n process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a\n :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the\n original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size and that any instance of it always\n returns the same elements in the same order.\n\n Args:\n dataset: Dataset used for sampling.\n num_replicas (int, optional): Number of processes participating in\n distributed training. By default, :attr:`world_size` is retrieved from the\n current distributed group.\n rank (int, optional): Rank of the current process within :attr:`num_replicas`.\n By default, :attr:`rank` is retrieved from the current distributed\n group.\n shuffle (bool, optional): If ``True`` (default), sampler will shuffle the\n indices.\n seed (int, optional): random seed used to shuffle the sampler if\n :attr:`shuffle=True`. This number should be identical across all\n processes in the distributed group. Default: ``0``.\n drop_last (bool, optional): if ``True``, then the sampler will drop the\n tail of the data to make it evenly divisible across the number of\n replicas. If ``False``, the sampler will add extra indices to make\n the data evenly divisible across the replicas. Default: ``False``.\n padding: (bool, optional): Whether to pad the dataset. Default: ``True``.\n pair: (bool, optional): Pair output for Mixup. Default: ``False``.\n\n .. warning::\n In distributed mode, calling the :meth:`set_epoch` method at\n the beginning of each epoch **before** creating the :class:`DataLoader` iterator\n is necessary to make shuffling work properly across multiple epochs. Otherwise,\n the same ordering will be always used.\n\n Example::\n\n >>> sampler = DistributedSampler(dataset) if is_distributed else None\n >>> loader = DataLoader(dataset, shuffle=(sampler is None),\n ... sampler=sampler)\n >>> for epoch in range(start_epoch, n_epochs):\n ... if is_distributed:\n ... sampler.set_epoch(epoch)\n ... train(loader)\n \"\"\"\n\n def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None,\n rank: Optional[int] = None, shuffle: bool = True,\n seed: int = 0, drop_last: bool = False,\n padding: bool = True,\n pair: bool = False) -> None:\n if num_replicas is None:\n if not dist.is_available():\n num_replicas = 1\n else:\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n rank = 0\n else:\n rank = dist.get_rank()\n if rank >= num_replicas or rank < 0:\n raise ValueError(\n \"Invalid rank {}, rank should be in the interval\"\n \" [0, {}]\".format(rank, num_replicas - 1))\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.drop_last = drop_last\n self.pair = pair\n self.padding = padding\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n T = self.num_replicas if not self.pair else self.num_replicas * 2\n self.total_size = len(self.dataset)\n if self.padding:\n num_parts = self.total_size // T\n has_rest = bool(self.total_size % T)\n if self.drop_last:\n self.total_size = num_parts * T\n else:\n self.total_size = (num_parts + has_rest) * T\n self.num_samples = (\n self.total_size + self.num_replicas - 1) // self.num_replicas\n self.shuffle = shuffle\n self.seed = seed\n\n def __iter__(self) -> Iterator[T_co]:\n if self.shuffle:\n # deterministically shuffle based on epoch and seed\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g)\n else:\n indices = torch.arange(len(self.dataset))\n\n if not self.drop_last:\n # add extra samples to make it evenly divisible\n if self.padding:\n padding_size = self.total_size - len(indices)\n # pad to total_size\n if padding_size <= len(indices):\n indices = torch.cat(\n [indices, indices[:padding_size]], dim=0)\n else:\n repeat_times = (self.total_size +\n len(indices) - 1) // len(indices)\n indices = indices.repeat(repeat_times)[:self.total_size]\n else:\n # remove tail of data to make it evenly divisible.\n indices = indices[:self.total_size]\n assert len(indices) == self.total_size\n\n # subsample\n if self.pair:\n indices = indices.view(-1, 2)\n indices = indices[self.rank:self.total_size:self.num_replicas].flatten(\n ).tolist()\n assert len(indices) == self.num_samples or (\n not self.padding and len(indices) == self.num_samples - 1)\n\n return iter(indices)\n\n def __len__(self) -> int:\n return self.num_samples\n\n def set_epoch(self, epoch: int) -> None:\n r\"\"\"\n Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas\n use a different random ordering for each epoch. Otherwise, the next iteration of this\n sampler will yield the same ordering.\n\n Args:\n epoch (int): Epoch number.\n \"\"\"\n self.epoch = epoch"
}
] | import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import Mixup
from timm.data import create_transform
from .augmentation import create_transform as create_transform_record
from .augmentation.mixup import Mixup as Mixup_record
from .augmentation.dataset_wrapper import DatasetWrapper
from .sampler import MyDistributedSampler
from timm.data import TimmDatasetTar
from timm.data import ImageDataset as TimmDatasetTar
from torchvision.transforms import InterpolationMode
from timm.data.transforms import _pil_interp | 5,663 | # --------------------------------------------------------
# TinyViT Data Builder
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyVIT
# --------------------------------------------------------
try:
except ImportError:
# for higher version of timm
try:
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED:
| # --------------------------------------------------------
# TinyViT Data Builder
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyVIT
# --------------------------------------------------------
try:
except ImportError:
# for higher version of timm
try:
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED: | dataset_train = DatasetWrapper(dataset_train, | 2 | 2023-11-03 09:54:45+00:00 | 8k |
fw-ai/fireworks_poe_bot | fireworks_poe_bot/fw_poe_qr_bot.py | [
{
"identifier": "PoeBot",
"path": "fireworks_poe_bot/fastapi_poe/base.py",
"snippet": "class PoeBot:\n # Override these for your bot\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n \"\"\"Override this to return a response to user queries.\"\"\"\n yield self.text_event(\"hello\")\n\n async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:\n \"\"\"Override this to return non-standard settings.\"\"\"\n return SettingsResponse()\n\n async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:\n \"\"\"Override this to record feedback from the user.\"\"\"\n pass\n\n async def on_error(self, error_request: ReportErrorRequest) -> None:\n \"\"\"Override this to record errors from the Poe server.\"\"\"\n logger.error(f\"Error from Poe server: {error_request}\")\n\n # Helpers for generating responses\n @staticmethod\n def text_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(data=json.dumps({\"text\": text}), event=\"text\")\n\n @staticmethod\n def replace_response_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(\n data=json.dumps({\"text\": text}), event=\"replace_response\"\n )\n\n @staticmethod\n def done_event() -> ServerSentEvent:\n return ServerSentEvent(data=\"{}\", event=\"done\")\n\n @staticmethod\n def suggested_reply_event(text: str) -> ServerSentEvent:\n return ServerSentEvent(data=json.dumps({\"text\": text}), event=\"suggested_reply\")\n\n @staticmethod\n def meta_event(\n *,\n content_type: ContentType = \"text/markdown\",\n refetch_settings: bool = False,\n linkify: bool = True,\n suggested_replies: bool = True,\n ) -> ServerSentEvent:\n return ServerSentEvent(\n data=json.dumps(\n {\n \"content_type\": content_type,\n \"refetch_settings\": refetch_settings,\n \"linkify\": linkify,\n \"suggested_replies\": suggested_replies,\n }\n ),\n event=\"meta\",\n )\n\n @staticmethod\n def error_event(\n text: Optional[str] = None, *, allow_retry: bool = True\n ) -> ServerSentEvent:\n data: Dict[str, Union[bool, str]] = {\"allow_retry\": allow_retry}\n if text is not None:\n data[\"text\"] = text\n return ServerSentEvent(data=json.dumps(data), event=\"error\")\n\n # Internal handlers\n\n async def handle_report_feedback(\n self, feedback_request: ReportFeedbackRequest\n ) -> JSONResponse:\n await self.on_feedback(feedback_request)\n return JSONResponse({})\n\n async def handle_report_error(\n self, error_request: ReportErrorRequest\n ) -> JSONResponse:\n await self.on_error(error_request)\n return JSONResponse({})\n\n async def handle_settings(self, settings_request: SettingsRequest) -> JSONResponse:\n settings = await self.get_settings(settings_request)\n return JSONResponse(settings.dict())\n\n async def handle_query(self, query: QueryRequest) -> AsyncIterable[ServerSentEvent]:\n try:\n async for event in self.get_response(query):\n if isinstance(event, ServerSentEvent):\n yield event\n elif isinstance(event, ErrorResponse):\n yield self.error_event(event.text, allow_retry=event.allow_retry)\n elif isinstance(event, MetaResponse):\n yield self.meta_event(\n content_type=event.content_type,\n refetch_settings=event.refetch_settings,\n linkify=event.linkify,\n suggested_replies=event.suggested_replies,\n )\n elif event.is_suggested_reply:\n yield self.suggested_reply_event(event.text)\n elif event.is_replace_response:\n yield self.replace_response_event(event.text)\n else:\n yield self.text_event(event.text)\n except Exception as e:\n logger.exception(\"Error responding to query\")\n yield self.error_event(repr(e), allow_retry=False)\n yield self.done_event()"
},
{
"identifier": "PartialResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class PartialResponse(BaseModel):\n \"\"\"Representation of a (possibly partial) response from a bot.\"\"\"\n\n text: str\n \"\"\"Partial response text.\n\n If the final bot response is \"ABC\", you may see a sequence\n of PartialResponse objects like PartialResponse(text=\"A\"),\n PartialResponse(text=\"B\"), PartialResponse(text=\"C\").\n\n \"\"\"\n\n raw_response: object = None\n \"\"\"For debugging, the raw response from the bot.\"\"\"\n\n full_prompt: Optional[str] = None\n \"\"\"For debugging, contains the full prompt as sent to the bot.\"\"\"\n\n request_id: Optional[str] = None\n \"\"\"May be set to an internal identifier for the request.\"\"\"\n\n is_suggested_reply: bool = False\n \"\"\"If true, this is a suggested reply.\"\"\"\n\n is_replace_response: bool = False\n \"\"\"If true, this text should completely replace the previous bot text.\"\"\""
},
{
"identifier": "QueryRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class QueryRequest(BaseRequest):\n \"\"\"Request parameters for a query request.\"\"\"\n\n query: List[ProtocolMessage]\n user_id: Identifier\n conversation_id: Identifier\n message_id: Identifier\n metadata: Identifier = \"\"\n api_key: str = \"<missing>\"\n access_key: str = \"<missing>\"\n temperature: float = 0.7\n skip_system_prompt: bool = False\n logit_bias: Dict[str, float] = {}\n stop_sequences: List[str] = []"
},
{
"identifier": "ReportErrorRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ReportErrorRequest(BaseRequest):\n \"\"\"Request parameters for a report_error request.\"\"\"\n\n message: str\n metadata: Dict[str, Any]"
},
{
"identifier": "ReportFeedbackRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ReportFeedbackRequest(BaseRequest):\n \"\"\"Request parameters for a report_feedback request.\"\"\"\n\n message_id: Identifier\n user_id: Identifier\n conversation_id: Identifier\n feedback_type: FeedbackType"
},
{
"identifier": "SettingsRequest",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class SettingsRequest(BaseRequest):\n \"\"\"Request parameters for a settings request.\"\"\""
},
{
"identifier": "SettingsResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class SettingsResponse(BaseModel):\n context_clear_window_secs: Optional[int] = None # deprecated\n allow_user_context_clear: bool = True # deprecated\n server_bot_dependencies: Dict[str, int] = Field(default_factory=dict)\n allow_attachments: bool = False\n introduction_message: str = \"\""
},
{
"identifier": "ErrorResponse",
"path": "fireworks_poe_bot/fastapi_poe/types.py",
"snippet": "class ErrorResponse(PartialResponse):\n \"\"\"Communicate errors from server bots.\"\"\"\n\n allow_retry: bool = False"
},
{
"identifier": "log_error",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_error(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "log_info",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_info(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "log_warn",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "@abstractmethod\ndef log_warn(self, payload: Dict[str, Any]):\n ..."
},
{
"identifier": "register_bot_plugin",
"path": "fireworks_poe_bot/plugin.py",
"snippet": "def register_bot_plugin(config_key: str, BotConfigClass: type = ModelConfig):\n def decorator(BotPluginClass: type):\n BOT_PLUGINS.append(_BotPlugin(\n BotPluginClass=BotPluginClass,\n BotConfigClass=BotConfigClass,\n config_key=config_key,\n ))\n\n return decorator"
},
{
"identifier": "ModelConfig",
"path": "fireworks_poe_bot/config.py",
"snippet": "class ModelConfig(BaseModel):\n model: str\n api_key: str\n\n SERVER_endpoint_account_override: Optional[str] = None\n SERVER_endpoint_model_override: Optional[str] = None\n\n @property\n def model_fqn(self):\n if (\n self.SERVER_endpoint_account_override is not None\n or self.SERVER_endpoint_model_override is not None\n ):\n _, account, _, model = self.model.split(\"/\")\n account = self.SERVER_endpoint_account_override or account\n model = self.SERVER_endpoint_model_override or model\n return f\"accounts/{account}/models/{model}\"\n else:\n return self.model"
}
] | import base64
import copy
import io
import fireworks.client
import time
import uuid
import requests
import qrcode
import traceback
from typing import AsyncIterable, Dict, List, Optional, Union
from .fastapi_poe import PoeBot
from sse_starlette.sse import ServerSentEvent
from .fastapi_poe.types import (
PartialResponse,
QueryRequest,
ReportErrorRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
ErrorResponse,
)
from fireworks.client.api import ChatMessage
from fireworks.client.error import InvalidRequestError
from fireworks.client.image import ImageInference, Answer
from fireworks_poe_bot.plugin import log_error, log_info, log_warn, register_bot_plugin
from fireworks_poe_bot.config import ModelConfig
from itertools import groupby
from PIL import Image
from google.cloud import storage | 4,446 | "qr_data": qr_data,
"qr_strength": qr_strength,
"prompt_strength": prompt_strength,
"response": response_text,
"elapsed_sec": elapsed_sec,
"elapsed_sec_inference": end_t_inference - start_t,
"elapsed_sec_upload": end_t - start_t_encode,
}
)
yield PartialResponse(text=response_text)
yield ServerSentEvent(event="done")
return
except Exception as e:
end_t = time.time()
log_error(
{
"severity": "ERROR",
"msg": "Invalid request",
"error": "\n".join(traceback.format_exception(e)),
"elapsed_sec": end_t - start_t,
**query.dict(),
}
)
if "prompt is too long" in str(e):
error_type = "user_message_too_long"
else:
error_type = None
yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))
return
finally:
fireworks.client.api_key = orig_api_key
# Function to upload a PIL Image to an S3 bucket with a presigned URL
def _upload_image_to_s3_with_ttl(
self, bucket_name, object_name, image: Image, expiration=600
):
"""
Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.
:param bucket_name: String name of the bucket to which the image is uploaded.
:param object_name: S3 object name. If not specified then file_name is used.
:param image: PIL Image object to be uploaded.
:param expiration: Time in seconds for the presigned URL to remain valid.
"""
# In-memory binary streams
in_mem_file = io.BytesIO()
# Save the PIL image to in-memory file as JPEG
image.save(in_mem_file, format="JPEG")
in_mem_file.seek(0) # Reset file pointer to the beginning
# Upload the image to S3
# self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)
self.s3_client.put_object(
Bucket=self.s3_bucket_name,
Key=object_name,
Body=in_mem_file,
ContentType="image/jpeg",
)
# Generate a presigned URL for the S3 object
url = self.s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
return url
def _upload_image_to_gcs(self, image: Image, bucket_name: str):
"""Uploads a given PIL.Image to a GCS bucket."""
# Generate a (statistically) unique filename with a uuid4
random_uuid = str(uuid.uuid4()).replace("-", "")
filename = f"{random_uuid}.jpg"
# Initialize the GCS client
client = storage.Client()
bucket = client.get_bucket(bucket_name)
# Convert the PIL.Image to bytes
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format="JPEG")
img_byte_arr = img_byte_arr.getvalue()
# Create a new blob (i.e., object) in the bucket and upload the image bytes
blob = bucket.blob(filename)
blob.upload_from_string(img_byte_arr, content_type=f"image/jpeg")
blob.make_public()
# The public URL can be accessed with the `public_url` attribute
public_url = blob.public_url
return public_url
def _download_image(self, image_url):
# Send an HTTP GET request to the image URL
response = requests.get(image_url)
# Check if the request was successful
if response.status_code == 200:
# Read the image content into an in-memory bytes buffer
image_bytes = io.BytesIO(response.content)
# Use Pillow to open the image from the bytes buffer
img = Image.open(image_bytes)
return img
else:
# If the request failed, raise an HTTPError with the response
response.raise_for_status()
async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:
"""Override this to return non-standard settings."""
return SettingsResponse()
async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:
"""Override this to record feedback from the user."""
pass
|
def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
log_warn(payload)
def _log_info(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "INFO",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
log_info(payload)
async def get_response(
self, query: QueryRequest
) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:
orig_api_key = self.client.api_key
fireworks.client.api_key = self.api_key
try:
start_t = time.time()
if len(query.query) == 0:
yield ErrorResponse(allow_retry=False, text="Empty query")
raise
messages: List[ChatMessage] = []
for protocol_message in query.query:
# OpenAI/Fireworks use the "assistant" role for the LLM, but Poe uses the
# "bot" role. Replace that one. Otherwise, ignore the role
if protocol_message.role not in {"system", "user", "bot"}:
self._log_warn({"msg": "Unknown role", **protocol_message})
continue
if protocol_message.content_type not in {"text/plain", "text/markdown"}:
self._log_warn({"msg": "Unknown content type", **protocol_message})
continue
# TODO: support protocol_message.feedback and protocol_message.attachments
# if needed
if protocol_message.role == "bot":
role = "assistant"
else:
role = protocol_message.role
messages.append({"role": role, "content": protocol_message.content})
self._log_info(
{
"msg": "Request received",
**query.dict(),
}
)
# The poe servers send us arbitrary lists of messages. We need to do a few things
# to normalize for our chat completion API:
# 1. Ensure that all assistant messages are preceded by a user message
# 2. Merge adjacent messages from the same role
# 3. Ensure that the last message is a user message
# Ensure that all assistant messages are preceded by a user message
for i in range(len(messages) - 1, -1, -1):
if messages[i]["role"] == "assistant" and (
i == 0 or messages[i - 1]["role"] != "user"
):
self._log_warn(
{
"msg": f"Assistant message {messages[i]} not preceded by user message"
}
)
messages.insert(i, {"role": "user", "content": ""})
# Merge adjacent messages from the same role
merged_messages = []
for role, group in groupby(messages, key=lambda x: x["role"]):
content = " ".join(message["content"] for message in group)
merged_messages.append({"role": role, "content": content})
messages = merged_messages
# Ensure last message is a user message
if messages[-1]["role"] != "user":
self._log_warn({"msg": f"Last message {messages[-1]} not a user message"})
messages.append({"role": "user", "content": ""})
# generated_len = 0
assert messages[-1]["role"] == "user"
prompt = messages[-1]["content"]
try:
prompt, qr_data, qr_strength, prompt_strength, model = parse_input(prompt, self.conditioning_scale, self.default_cfg_scale)
except Exception as e:
yield self.text_event(text=f"Error parsing input: {e}")
return
if model == "sdxl":
self.client.model = "stable-diffusion-xl-1024-v1-0"
elif model == "sdv1.5":
self.client.model = "stable-diffusion-v1-5"
else:
yield self.text_event(text=f"Unknown model: {model}. Model must be one of 'sdxl' or 'sdv1.5'.")
return
qr_image = gen_qr_code(qr_data)
answer: Answer = await self.client.control_net_async(
control_image=qr_image,
control_net_name="qr",
conditioning_scale=qr_strength,
prompt=prompt,
cfg_scale=prompt_strength,
sampler=None,
steps=25,
seed=0,
safety_check=False,
output_image_format="JPG",
# Add additional parameters here as necessary
)
end_t_inference = time.time()
start_t_encode = time.time()
if answer.finish_reason == "CONTENT_FILTERED":
yield self.text_event(text="Potentially sensitive content detected")
return
public_image_url = self._upload_image_to_gcs(
answer.image, self.gcs_bucket_name
)
response_text = f""
end_t = time.time()
elapsed_sec = end_t - start_t
self._log_info(
{
"severity": "INFO",
"msg": "Request completed",
**query.dict(),
"prompt": prompt,
"qr_data": qr_data,
"qr_strength": qr_strength,
"prompt_strength": prompt_strength,
"response": response_text,
"elapsed_sec": elapsed_sec,
"elapsed_sec_inference": end_t_inference - start_t,
"elapsed_sec_upload": end_t - start_t_encode,
}
)
yield PartialResponse(text=response_text)
yield ServerSentEvent(event="done")
return
except Exception as e:
end_t = time.time()
log_error(
{
"severity": "ERROR",
"msg": "Invalid request",
"error": "\n".join(traceback.format_exception(e)),
"elapsed_sec": end_t - start_t,
**query.dict(),
}
)
if "prompt is too long" in str(e):
error_type = "user_message_too_long"
else:
error_type = None
yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))
return
finally:
fireworks.client.api_key = orig_api_key
# Function to upload a PIL Image to an S3 bucket with a presigned URL
def _upload_image_to_s3_with_ttl(
self, bucket_name, object_name, image: Image, expiration=600
):
"""
Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.
:param bucket_name: String name of the bucket to which the image is uploaded.
:param object_name: S3 object name. If not specified then file_name is used.
:param image: PIL Image object to be uploaded.
:param expiration: Time in seconds for the presigned URL to remain valid.
"""
# In-memory binary streams
in_mem_file = io.BytesIO()
# Save the PIL image to in-memory file as JPEG
image.save(in_mem_file, format="JPEG")
in_mem_file.seek(0) # Reset file pointer to the beginning
# Upload the image to S3
# self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)
self.s3_client.put_object(
Bucket=self.s3_bucket_name,
Key=object_name,
Body=in_mem_file,
ContentType="image/jpeg",
)
# Generate a presigned URL for the S3 object
url = self.s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
return url
def _upload_image_to_gcs(self, image: Image, bucket_name: str):
"""Uploads a given PIL.Image to a GCS bucket."""
# Generate a (statistically) unique filename with a uuid4
random_uuid = str(uuid.uuid4()).replace("-", "")
filename = f"{random_uuid}.jpg"
# Initialize the GCS client
client = storage.Client()
bucket = client.get_bucket(bucket_name)
# Convert the PIL.Image to bytes
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format="JPEG")
img_byte_arr = img_byte_arr.getvalue()
# Create a new blob (i.e., object) in the bucket and upload the image bytes
blob = bucket.blob(filename)
blob.upload_from_string(img_byte_arr, content_type=f"image/jpeg")
blob.make_public()
# The public URL can be accessed with the `public_url` attribute
public_url = blob.public_url
return public_url
def _download_image(self, image_url):
# Send an HTTP GET request to the image URL
response = requests.get(image_url)
# Check if the request was successful
if response.status_code == 200:
# Read the image content into an in-memory bytes buffer
image_bytes = io.BytesIO(response.content)
# Use Pillow to open the image from the bytes buffer
img = Image.open(image_bytes)
return img
else:
# If the request failed, raise an HTTPError with the response
response.raise_for_status()
async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:
"""Override this to return non-standard settings."""
return SettingsResponse()
async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:
"""Override this to record feedback from the user."""
pass
| async def on_error(self, error_request: ReportErrorRequest) -> None: | 3 | 2023-11-03 23:24:23+00:00 | 8k |
Fsoft-AIC/LSDM | atiss/scene_synthesis/datasets/threed_front.py | [
{
"identifier": "BaseDataset",
"path": "atiss/scene_synthesis/datasets/common.py",
"snippet": "class BaseDataset(Dataset):\n \"\"\"Implements the interface for all datasets that consist of scenes.\"\"\"\n def __init__(self, scenes):\n assert len(scenes) > 0\n self.scenes = scenes\n\n def __len__(self):\n return len(self.scenes)\n\n def __getitem__(self, idx):\n return self.scenes[idx]\n\n @property\n def class_labels(self):\n raise NotImplementedError()\n\n @property\n def n_classes(self):\n return len(self.class_labels)\n\n @property\n def object_types(self):\n raise NotImplementedError()\n\n @property\n def n_object_types(self):\n \"\"\"The number of distinct objects contained in the scenes.\"\"\"\n return len(self.object_types)\n\n @property\n def room_types(self):\n return set([si.scene_type for si in self.scenes])\n\n @property\n def count_objects_in_rooms(self):\n return Counter([len(si.bboxes) for si in self.scenes])\n\n def post_process(self, s):\n return s\n\n @staticmethod\n def with_valid_scene_ids(invalid_scene_ids):\n def inner(scene):\n return scene if scene.scene_id not in invalid_scene_ids else False\n return inner\n\n @staticmethod\n def with_scene_ids(scene_ids):\n def inner(scene):\n return scene if scene.scene_id in scene_ids else False\n return inner\n\n @staticmethod\n def with_room(scene_type):\n def inner(scene):\n return scene if scene_type in scene.scene_type else False\n return inner\n\n @staticmethod\n def room_smaller_than_along_axis(max_size, axis=1):\n def inner(scene):\n return scene if scene.bbox[1][axis] <= max_size else False\n return inner\n\n @staticmethod\n def room_larger_than_along_axis(min_size, axis=1):\n def inner(scene):\n return scene if scene.bbox[0][axis] >= min_size else False\n return inner\n\n @staticmethod\n def floor_plan_with_limits(limit_x, limit_y, axis=[0, 2]):\n def inner(scene):\n min_bbox, max_bbox = scene.floor_plan_bbox\n t_x = max_bbox[axis[0]] - min_bbox[axis[0]]\n t_y = max_bbox[axis[1]] - min_bbox[axis[1]]\n if t_x <= limit_x and t_y <= limit_y:\n return scene\n else:\n False\n return inner\n\n @staticmethod\n def with_valid_boxes(box_types):\n def inner(scene):\n for i in range(len(scene.bboxes)-1, -1, -1):\n if scene.bboxes[i].label not in box_types:\n scene.bboxes.pop(i)\n return scene\n return inner\n\n @staticmethod\n def without_box_types(box_types):\n def inner(scene):\n for i in range(len(scene.bboxes)-1, -1, -1):\n if scene.bboxes[i].label in box_types:\n scene.bboxes.pop(i)\n return scene\n return inner\n\n @staticmethod\n def with_generic_classes(box_types_map):\n def inner(scene):\n for box in scene.bboxes:\n # Update the box label based on the box_types_map\n box.label = box_types_map[box.label]\n return scene\n return inner\n\n @staticmethod\n def with_valid_bbox_jids(invalid_bbox_jds):\n def inner(scene):\n return (\n False if any(b.model_jid in invalid_bbox_jds for b in scene.bboxes)\n else scene\n )\n return inner\n\n @staticmethod\n def at_most_boxes(n):\n def inner(scene):\n return scene if len(scene.bboxes) <= n else False\n return inner\n\n @staticmethod\n def at_least_boxes(n):\n def inner(scene):\n return scene if len(scene.bboxes) >= n else False\n return inner\n\n @staticmethod\n def with_object_types(objects):\n def inner(scene):\n return (\n scene if all(b.label in objects for b in scene.bboxes)\n else False\n )\n return inner\n\n @staticmethod\n def contains_object_types(objects):\n def inner(scene):\n return (\n scene if any(b.label in objects for b in scene.bboxes)\n else False\n )\n return inner\n\n @staticmethod\n def without_object_types(objects):\n def inner(scene):\n return (\n False if any(b.label in objects for b in scene.bboxes)\n else scene\n )\n return inner\n\n @staticmethod\n def filter_compose(*filters):\n def inner(scene):\n s = scene\n fs = iter(filters)\n try:\n while s:\n s = next(fs)(s)\n except StopIteration:\n pass\n return s\n return inner"
},
{
"identifier": "Room",
"path": "atiss/scene_synthesis/datasets/threed_front_scene.py",
"snippet": "class Room(BaseScene):\n def __init__(\n self, scene_id, scene_type, bboxes, extras, json_path,\n path_to_room_masks_dir=None\n ):\n super().__init__(scene_id, scene_type, bboxes)\n self.json_path = json_path\n self.extras = extras\n\n self.uid = \"_\".join([self.json_path, scene_id])\n self.path_to_room_masks_dir = path_to_room_masks_dir\n if path_to_room_masks_dir is not None:\n self.path_to_room_mask = os.path.join(\n self.path_to_room_masks_dir, self.uid, \"room_mask.png\"\n )\n else:\n self.path_to_room_mask = None\n\n @property\n def floor(self):\n return [ei for ei in self.extras if ei.model_type == \"Floor\"][0]\n\n @property\n @lru_cache(maxsize=512)\n def bbox(self):\n corners = np.empty((0, 3))\n for f in self.bboxes:\n corners = np.vstack([corners, f.corners()])\n return np.min(corners, axis=0), np.max(corners, axis=0)\n\n @cached_property\n def bboxes_centroid(self):\n a, b = self.bbox\n return (a+b)/2\n\n @property\n def furniture_in_room(self):\n return [f.label for f in self.bboxes]\n\n @property\n def floor_plan(self):\n def cat_mesh(m1, m2):\n v1, f1 = m1\n v2, f2 = m2\n v = np.vstack([v1, v2])\n f = np.vstack([f1, f2 + len(v1)])\n return v, f\n\n # Compute the full floor plan\n vertices, faces = reduce(\n cat_mesh,\n ((ei.xyz, ei.faces) for ei in self.extras if ei.model_type == \"Floor\")\n )\n return np.copy(vertices), np.copy(faces)\n\n @cached_property\n def floor_plan_bbox(self):\n vertices, faces = self.floor_plan\n return np.min(vertices, axis=0), np.max(vertices, axis=0)\n\n @cached_property\n def floor_plan_centroid(self):\n a, b = self.floor_plan_bbox\n return (a+b)/2\n\n @cached_property\n def centroid(self):\n return self.floor_plan_centroid\n\n @property\n def count_furniture_in_room(self):\n return Counter(self.furniture_in_room)\n\n @property\n def room_mask(self):\n return self.room_mask_rotated(0)\n\n def room_mask_rotated(self, angle=0):\n # The angle is in rad\n im = Image.open(self.path_to_room_mask).convert(\"RGB\")\n # Downsample the room_mask image by applying bilinear interpolation\n im = im.rotate(angle * 180 / np.pi, resample=Image.BICUBIC)\n\n return np.asarray(im).astype(np.float32) / np.float32(255)\n\n def category_counts(self, class_labels):\n \"\"\"List of category counts in the room\n \"\"\"\n print(class_labels)\n if \"start\" in class_labels and \"end\" in class_labels:\n class_labels = class_labels[:-2]\n category_counts = [0]*len(class_labels)\n\n for di in self.furniture_in_room:\n category_counts[class_labels.index(di)] += 1\n return category_counts\n\n def ordered_bboxes_with_centroid(self):\n centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])\n ordering = np.lexsort(centroids.T)\n ordered_bboxes = [self.bboxes[i] for i in ordering]\n\n return ordered_bboxes\n\n def ordered_bboxes_with_class_labels(self, all_labels):\n centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])\n int_labels = np.array(\n [[f.int_label(all_labels)] for f in self.bboxes]\n )\n ordering = np.lexsort(np.hstack([centroids, int_labels]).T)\n ordered_bboxes = [self.bboxes[i] for i in ordering]\n\n return ordered_bboxes\n\n def ordered_bboxes_with_class_frequencies(self, class_order):\n centroids = np.array([f.centroid(-self.centroid) for f in self.bboxes])\n label_order = np.array([\n [class_order[f.label]] for f in self.bboxes\n ])\n ordering = np.lexsort(np.hstack([centroids, label_order]).T)\n ordered_bboxes = [self.bboxes[i] for i in ordering[::-1]]\n\n return ordered_bboxes\n\n def furniture_renderables(\n self,\n colors=(0.5, 0.5, 0.5),\n with_bbox_corners=False,\n with_origin=False,\n with_bboxes=False,\n with_objects_offset=False,\n with_floor_plan_offset=False,\n with_floor_plan=False,\n with_texture=False\n ):\n if with_objects_offset:\n offset = -self.bboxes_centroid\n elif with_floor_plan_offset:\n offset = -self.floor_plan_centroid\n else:\n offset = [[0, 0, 0]]\n\n renderables = [\n f.mesh_renderable(\n colors=colors, offset=offset, with_texture=with_texture\n )\n for f in self.bboxes\n ]\n if with_origin:\n renderables += [f.origin_renderable(offset) for f in self.bboxes]\n if with_bbox_corners:\n for f in self.bboxes:\n renderables += [f.bbox_corners_renderable(offset=offset)]\n if with_bboxes:\n for f in self.bboxes:\n renderables += [f.bbox_renderable(offset=offset)]\n if with_floor_plan:\n vertices, faces = self.floor_plan\n vertices = vertices + offset\n renderables += [\n Mesh.from_faces(vertices, faces, colors=(0.8, 0.8, 0.8, 0.6))\n ]\n return renderables\n\n def show(\n self,\n behaviours=[LightToCamera(), SnapshotOnKey()],\n with_bbox_corners=False,\n with_bboxes=False,\n with_objects_offset=False,\n with_floor_plan_offset=False,\n with_floor_plan=False,\n background=(1.0, 1.0, 1.0, 1.0),\n camera_target=(0, 0, 0),\n camera_position=(-2, -2, -2),\n up_vector=(0, 0, 1),\n window_size=(512, 512)\n ):\n renderables = self.furniture_renderables(\n with_bbox_corners=with_bbox_corners,\n with_bboxes=with_bboxes,\n with_objects_offset=with_objects_offset,\n with_floor_plan_offset=with_floor_plan_offset,\n with_floor_plan=with_floor_plan\n )\n show(\n renderables, behaviours=behaviours,\n size=window_size, camera_position=camera_position,\n camera_target=camera_target, up_vector=up_vector,\n background=background\n )\n\n def augment_room(self, objects_dataset):\n bboxes = self.bboxes\n # Randomly pick an asset to be augmented\n bi = np.random.choice(self.bboxes)\n query_label = bi.label\n query_size = bi.size + np.random.normal(0, 0.02)\n # Retrieve the new asset based on the size of the picked asset\n furniture = objects_dataset.get_closest_furniture_to_box(\n query_label, query_size\n )\n bi_retrieved = bi.copy_from_other_model(furniture)\n\n new_bboxes = [\n box for box in bboxes if not box == bi\n ] + [bi_retrieved]\n\n return Room(\n scene_id=self.scene_id + \"_augm\",\n scene_type=self.scene_type,\n bboxes=new_bboxes,\n extras=self.extras,\n json_path=self.json_path,\n path_to_room_masks_dir=self.path_to_room_masks_dir\n )"
},
{
"identifier": "parse_threed_front_scenes",
"path": "atiss/scene_synthesis/datasets/utils.py",
"snippet": "def parse_threed_front_scenes(\n dataset_directory, path_to_model_info, path_to_models,\n path_to_room_masks_dir=None\n):\n if os.getenv(\"PATH_TO_SCENES\"):\n scenes = pickle.load(open(os.getenv(\"PATH_TO_SCENES\"), \"rb\"))\n else:\n # Parse the model info\n mf = ModelInfo.from_file(path_to_model_info)\n model_info = mf.model_info\n\n path_to_scene_layouts = [\n os.path.join(dataset_directory, f)\n for f in sorted(os.listdir(dataset_directory))\n if f.endswith(\".json\")\n ]\n scenes = []\n unique_room_ids = set()\n # Start parsing the dataset\n print(\"Loading dataset \", end=\"\")\n for i, m in enumerate(path_to_scene_layouts):\n with open(m) as f:\n data = json.load(f)\n # Parse the furniture of the scene\n furniture_in_scene = defaultdict()\n for ff in data[\"furniture\"]:\n if \"valid\" in ff and ff[\"valid\"]:\n furniture_in_scene[ff[\"uid\"]] = dict(\n model_uid=ff[\"uid\"],\n model_jid=ff[\"jid\"],\n model_info=model_info[ff[\"jid\"]]\n )\n\n # Parse the extra meshes of the scene e.g walls, doors,\n # windows etc.\n meshes_in_scene = defaultdict()\n for mm in data[\"mesh\"]:\n meshes_in_scene[mm[\"uid\"]] = dict(\n mesh_uid=mm[\"uid\"],\n mesh_jid=mm[\"jid\"],\n mesh_xyz=np.asarray(mm[\"xyz\"]).reshape(-1, 3),\n mesh_faces=np.asarray(mm[\"faces\"]).reshape(-1, 3),\n mesh_type=mm[\"type\"]\n )\n\n # Parse the rooms of the scene\n scene = data[\"scene\"]\n # Keep track of the parsed rooms\n rooms = []\n for rr in scene[\"room\"]:\n # Keep track of the furniture in the room\n furniture_in_room = []\n # Keep track of the extra meshes in the room\n extra_meshes_in_room = []\n # Flag to keep track of invalid scenes\n is_valid_scene = True\n\n for cc in rr[\"children\"]:\n if cc[\"ref\"] in furniture_in_scene:\n tf = furniture_in_scene[cc[\"ref\"]]\n # If scale is very small/big ignore this scene\n if any(si < 1e-5 for si in cc[\"scale\"]):\n is_valid_scene = False\n break\n if any(si > 5 for si in cc[\"scale\"]):\n is_valid_scene = False\n break\n furniture_in_room.append(ThreedFutureModel(\n tf[\"model_uid\"],\n tf[\"model_jid\"],\n tf[\"model_info\"],\n cc[\"pos\"],\n cc[\"rot\"],\n cc[\"scale\"],\n path_to_models\n ))\n elif cc[\"ref\"] in meshes_in_scene:\n mf = meshes_in_scene[cc[\"ref\"]]\n extra_meshes_in_room.append(ThreedFutureExtra(\n mf[\"mesh_uid\"],\n mf[\"mesh_jid\"],\n mf[\"mesh_xyz\"],\n mf[\"mesh_faces\"],\n mf[\"mesh_type\"],\n cc[\"pos\"],\n cc[\"rot\"],\n cc[\"scale\"]\n ))\n else:\n continue\n if len(furniture_in_room) > 1 and is_valid_scene:\n # Check whether a room with the same instanceid has\n # already been added to the list of rooms\n if rr[\"instanceid\"] not in unique_room_ids:\n unique_room_ids.add(rr[\"instanceid\"])\n # Add to the list\n rooms.append(Room(\n rr[\"instanceid\"], # scene_id\n rr[\"type\"].lower(), # scene_type\n furniture_in_room, # bounding boxes\n extra_meshes_in_room, # extras e.g. walls\n m.split(\"/\")[-1].split(\".\")[0], # json_path\n path_to_room_masks_dir\n ))\n scenes.append(rooms)\n s = \"{:5d} / {:5d}\".format(i, len(path_to_scene_layouts))\n print(s, flush=True, end=\"\\b\"*len(s))\n print()\n\n scenes = sum(scenes, [])\n pickle.dump(scenes, open(\"/tmp/threed_front.pkl\", \"wb\"))\n\n return scenes"
}
] | from collections import Counter, OrderedDict
from functools import lru_cache
from PIL import Image
from .common import BaseDataset
from .threed_front_scene import Room
from .utils import parse_threed_front_scenes
import numpy as np
import json
import os | 4,465 | #
# Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
# Licensed under the NVIDIA Source Code License.
# See LICENSE at https://github.com/nv-tlabs/ATISS.
# Authors: Despoina Paschalidou, Amlan Kar, Maria Shugrina, Karsten Kreis,
# Andreas Geiger, Sanja Fidler
#
class ThreedFront(BaseDataset):
"""Container for the scenes in the 3D-FRONT dataset.
Arguments
---------
scenes: list of Room objects for all scenes in 3D-FRONT dataset
"""
def __init__(self, scenes, bounds=None):
super().__init__(scenes)
| #
# Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
# Licensed under the NVIDIA Source Code License.
# See LICENSE at https://github.com/nv-tlabs/ATISS.
# Authors: Despoina Paschalidou, Amlan Kar, Maria Shugrina, Karsten Kreis,
# Andreas Geiger, Sanja Fidler
#
class ThreedFront(BaseDataset):
"""Container for the scenes in the 3D-FRONT dataset.
Arguments
---------
scenes: list of Room objects for all scenes in 3D-FRONT dataset
"""
def __init__(self, scenes, bounds=None):
super().__init__(scenes) | assert isinstance(self.scenes[0], Room) | 1 | 2023-11-06 07:55:51+00:00 | 8k |
molML/traversing_chem_space | active_learning/screening.py | [
{
"identifier": "Ensemble",
"path": "active_learning/nn.py",
"snippet": "class Ensemble(torch.nn.Module):\n \"\"\" Ensemble of GCNs\"\"\"\n def __init__(self, ensemble_size: int = 10, seed: int = 0, architecture: str = 'mlp', **kwargs) -> None:\n self.ensemble_size = ensemble_size\n self.architecture = architecture\n self.seed = seed\n rng = np.random.default_rng(seed=seed)\n self.seeds = rng.integers(0, 1000, ensemble_size)\n self.models = {i: Model(seed=s, architecture=architecture, **kwargs) for i, s in enumerate(self.seeds)}\n\n def optimize_hyperparameters(self, x, y: DataLoader, **kwargs):\n # raise NotImplementedError\n best_hypers = optimize_hyperparameters(x, y, architecture=self.architecture, **kwargs)\n # # re-init model wrapper with optimal hyperparameters\n self.__init__(ensemble_size=self.ensemble_size, seed=self.seed, **best_hypers)\n\n def train(self, dataloader: DataLoader, **kwargs) -> None:\n for i, m in self.models.items():\n m.train(dataloader, **kwargs)\n\n def predict(self, dataloader, **kwargs) -> Tensor:\n \"\"\" logits_N_K_C = [N, num_inference_samples, num_classes] \"\"\"\n logits_N_K_C = torch.stack([m.predict(dataloader) for m in self.models.values()], 1)\n\n return logits_N_K_C\n\n def __getitem__(self, item):\n return self.models[item]\n\n def __repr__(self) -> str:\n return f\"Ensemble of {self.ensemble_size} Classifiers\""
},
{
"identifier": "MasterDataset",
"path": "active_learning/data_prep.py",
"snippet": "class MasterDataset:\n \"\"\" Dataset that holds all data in an indexable way \"\"\"\n def __init__(self, name: str, df: pd.DataFrame = None, dataset: str = 'ALDH1', representation: str = 'ecfp', root: str = 'data',\n overwrite: bool = False) -> None:\n\n assert representation in ['ecfp', 'graph'], f\"'representation' must be 'ecfp' or 'graph', not {representation}\"\n self.representation = representation\n self.pth = os.path.join(ROOT_DIR, root, dataset, name)\n\n # If not done already, process all data. Else just load it\n if not os.path.exists(self.pth) or overwrite:\n assert df is not None, \"You need to supply a dataframe with 'smiles' and 'y' values\"\n os.makedirs(os.path.join(root, dataset, name), exist_ok=True)\n self.process(df)\n self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()\n else:\n self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()\n\n def process(self, df: pd.DataFrame) -> None:\n\n print('Processing data ... ', flush=True, file=sys.stderr)\n\n index_smiles = OrderedDict({i: smi for i, smi in enumerate(df.smiles)})\n smiles_index = OrderedDict({smi: i for i, smi in enumerate(df.smiles)})\n smiles = np.array(df.smiles.tolist())\n x = smiles_to_ecfp(smiles, silent=False)\n y = torch.tensor(df.y.tolist())\n graphs = [smiles_to_graph(smi, y=y.type(torch.LongTensor)) for smi, y in tqdm(zip(smiles, y))]\n\n torch.save(index_smiles, os.path.join(self.pth, 'index_smiles'))\n torch.save(smiles_index, os.path.join(self.pth, 'smiles_index'))\n torch.save(smiles, os.path.join(self.pth, 'smiles'))\n torch.save(x, os.path.join(self.pth, 'x'))\n torch.save(y, os.path.join(self.pth, 'y'))\n torch.save(graphs, os.path.join(self.pth, 'graphs'))\n\n def load(self) -> (dict, dict, np.ndarray, np.ndarray, np.ndarray, list):\n\n print('Loading data ... ', flush=True, file=sys.stderr)\n\n index_smiles = torch.load(os.path.join(self.pth, 'index_smiles'))\n smiles_index = torch.load(os.path.join(self.pth, 'smiles_index'))\n smiles = torch.load(os.path.join(self.pth, 'smiles'))\n x = torch.load(os.path.join(self.pth, 'x'))\n y = torch.load(os.path.join(self.pth, 'y'))\n graphs = torch.load(os.path.join(self.pth, 'graphs'))\n\n return smiles_index, index_smiles, smiles, x, y, graphs\n\n def __len__(self) -> int:\n return len(self.smiles)\n\n def all(self):\n return self[range(len(self.smiles))]\n\n def __getitem__(self, idx):\n if type(idx) is int:\n idx = [idx]\n if self.representation == 'ecfp':\n return self.x[idx], self.y[idx], self.smiles[idx]\n if self.representation == 'graph':\n return [self.graphs[i] for i in idx], self.y[idx], self.smiles[idx]"
},
{
"identifier": "Handler",
"path": "active_learning/data_handler.py",
"snippet": "class Handler:\n def __init__(self, n_start: int = 64, bias: str = 'random', seed: int = 42, dataset: str = 'ALDH1') -> None:\n\n assert bias in ['random', 'small', 'large'], \"'bias' has to be either 'random', 'small', or 'large'\"\n assert n_start <= 64 or bias == 'random', 'Number of starting molecules has to be <= 64'\n\n self.index_smiles = torch.load(os.path.join(ROOT_DIR, 'data', dataset, 'screen', 'index_smiles'))\n self.smiles_index = torch.load(os.path.join(ROOT_DIR, 'data', dataset, 'screen', 'smiles_index'))\n self.all_y = torch.load(os.path.join(ROOT_DIR, 'data', dataset, 'screen', 'y'))\n\n self.dataset = dataset\n self.selected_start_cluster = None\n self.train_idx, self.screen_idx = self.get_start_data(n_start=n_start, bias=bias, seed=seed)\n self.picks = [self.train_idx]\n\n def get_start_data(self, n_start: int = 64, bias: str = 'random', seed: int = 0) -> (np.ndarray, np.ndarray):\n\n rng = np.random.default_rng(seed=seed)\n starting_clusters = torch.load(os.path.join(ROOT_DIR, f'data/{self.dataset}/screen/starting_clusters'))\n n_clusters = len(starting_clusters)\n self.selected_start_cluster = seed if seed <= n_clusters else rng.integers(0, n_clusters)\n\n if bias == 'random':\n # get a random hit to start out with\n hits_idx = np.where(self.all_y == 1)[0]\n selected_hit_idx = np.array([hits_idx[rng.integers(0, len(hits_idx))]])\n\n # get the other random molecules\n remaining_idxs = np.array([i for i in range(len(self.all_y)) if i not in selected_hit_idx])\n selected_others_idx = rng.integers(0, len(remaining_idxs), n_start - 1)\n else:\n # select a random cluster\n cluster_smiles = starting_clusters[self.selected_start_cluster][0 if bias == 'large' else 1]\n\n # get the molecule indices and labels\n cluster_smiles_idx = np.array([self.smiles_index[smi] for smi in cluster_smiles])\n cluster_smiles_labels = self.all_y[cluster_smiles_idx]\n\n # get all hits and select a random hit as a starting point\n hits_idx = cluster_smiles_idx[np.where(cluster_smiles_labels == 1)[0]]\n selected_hit_idx = np.array([hits_idx[rng.integers(0, len(hits_idx))]])\n\n # get the other random molecules from the cluster\n remaining_idxs = np.array([i for i in cluster_smiles_idx if i not in selected_hit_idx])\n selected_others_idx = remaining_idxs[rng.integers(0, len(remaining_idxs), n_start - 1)]\n\n train_idx = np.concatenate((selected_hit_idx, selected_others_idx))\n rng.shuffle(train_idx)\n\n screen_idx = np.array([i for i in range(len(self.all_y)) if i not in train_idx])\n assert len(np.intersect1d(screen_idx, train_idx)) == 0, \"Something went wrong selecting train/screen samples\"\n\n return train_idx, screen_idx\n\n def add(self, picks: Union[list, np.ndarray]):\n # Get the corresponding indices of the master dataset, and save it in self.acquired\n added_idx = np.array([self.smiles_index[smi] for smi in picks])\n self.picks.append(added_idx)\n\n self.train_idx = np.concatenate((self.train_idx, added_idx))\n self.screen_idx = np.array([i for i in range(len(self.all_y)) if i not in self.train_idx])\n\n def get_idx(self) -> (np.ndarray, np.ndarray):\n return self.train_idx, self.screen_idx\n\n def __call__(self, *args, **kwargs):\n return self.get_idx()"
},
{
"identifier": "Evaluate",
"path": "active_learning/utils.py",
"snippet": "class Evaluate:\n def __init__(self):\n self.binary_accuracy = [0]\n self.balanced_accuracy = [0]\n self.precision = [0]\n self.tpr = [0]\n self.roc_auc = [0]\n self.tn, self.fp, self.fn, self.tp = [0], [0], [0], [0]\n\n def eval(self, logits_N_K_C: torch.Tensor, y: torch.Tensor):\n\n y = y.cpu() if type(y) is torch.Tensor else torch.tensor(y)\n y_hat = torch.mean(torch.exp(logits_N_K_C), dim=1)\n y_hat = y_hat.cpu() if type(y_hat) is torch.Tensor else torch.tensor(y_hat)\n\n y_hat_bin = torch.argmax(y_hat, dim=1)\n y_hat = y_hat[:, 1]\n\n # calc_binary_accuracy\n acc = torch.sum(y_hat_bin == y) / len(y)\n self.binary_accuracy.append(acc.item())\n\n # calc_balanced_accuracy\n balanced_acc = balanced_accuracy_score(y, y_hat_bin)\n self.balanced_accuracy.append(balanced_acc)\n\n # calc roc-auc\n roc_auc = roc_auc_score(y, y_hat)\n self.roc_auc.append(roc_auc)\n\n # calc_precision\n try:\n self.precision.append(precision_score(y, y_hat_bin, zero_division=0))\n except:\n self.precision.append(0)\n\n # calc recall\n try:\n self.tpr.append(recall_score(y, y_hat_bin))\n except:\n self.tpr.append(0)\n\n # calc confusion\n tn, fp, fn, tp = confusion_matrix(y, y_hat_bin).ravel()\n self.tn.append(tn)\n self.fp.append(fp)\n self.fn.append(fn)\n self.tp.append(tp)\n\n def __repr__(self):\n return f\"Binary accuracy: {self.binary_accuracy[-1]:.4f}\\n\" \\\n f\"Balanced accuracy: {self.balanced_accuracy[-1]:.4f}\\n\" \\\n f\"ROC AUC: {self.roc_auc[-1]:.4f}\\n\" \\\n f\"Precision: {self.precision[-1]:.4f}\\n\" \\\n f\"True positive rate: {self.tpr[-1]:.4f}\\n\" \\\n f\"Hits: {self.tp[-1]}\\n\" \\\n f\"Misses: {self.fn[-1]}\\n\" \\\n f\"False positives: {self.fp[-1]}\\n\" \\\n f\"True negatives: {self.tn[-1]}\\n\"\n\n def to_dataframe(self, colnames: str = ''):\n df = pd.DataFrame({'cycle': list(range(len(self.tp))), 'binary_accuracy': self.binary_accuracy,\n 'balanced_accuracy': self.balanced_accuracy, 'roc_auc': self.roc_auc,\n 'precision': self.precision, 'tpr': self.tpr,\n 'tp': self.tp, 'fn': self.fn, 'fp': self.fp, 'tn': self.tn})\n df.columns = [f\"{colnames}{i}\" for i in df.columns]\n\n return df"
},
{
"identifier": "to_torch_dataloader",
"path": "active_learning/utils.py",
"snippet": "def to_torch_dataloader(x: Union[list, np.ndarray], y: Optional[np.ndarray] = None, **kwargs) -> \\\n Union[DataLoader, pyg_DataLoader]:\n\n if type(x) is np.ndarray:\n assert y is not None, 'No y values provided'\n return DataLoader(TensorDataset(Tensor(x), Tensor(y).unsqueeze(1).type(torch.LongTensor)), **kwargs)\n else:\n return pyg_DataLoader(x, **kwargs)"
},
{
"identifier": "Acquisition",
"path": "active_learning/acquisition.py",
"snippet": "class Acquisition:\n def __init__(self, method: str, seed: int = 42, **kwargs):\n\n self.acquisition_method = {'random': self.random_pick,\n 'exploration': greedy_exploration,\n 'exploitation': greedy_exploitation,\n 'dynamic': dynamic_exploration,\n 'dynamicbald': dynamic_exploration_bald,\n 'bald': bald,\n 'batch_bald': batch_bald,\n 'similarity': similarity_search}\n\n assert method in self.acquisition_method.keys(), f\"Specified 'method' not available. \" \\\n f\"Select from: {self.acquisition_method.keys()}\"\n\n self.method = method\n self.params = kwargs\n self.rng = np.random.default_rng(seed=seed)\n self.iteration = 0\n\n def acquire(self, logits_N_K_C: Tensor, smiles: np.ndarray[str], hits: np.ndarray[str], n: int = 1) -> \\\n np.ndarray[str]:\n\n self.iteration += 1\n\n return self.acquisition_method[self.method](logits_N_K_C=logits_N_K_C, smiles=smiles, n=n, hits=hits,\n iteration=self.iteration, **self.params)\n\n def __call__(self, *args, **kwargs) -> np.ndarray[str]:\n return self.acquire(*args, **kwargs)\n\n def random_pick(self, smiles: np.ndarray[str], n: int = 1, return_smiles: bool = True, **kwargs) -> np.ndarray:\n \"\"\" select n random samples \"\"\"\n picks_idx = self.rng.integers(0, len(smiles), n)\n\n return smiles[picks_idx] if return_smiles else picks_idx"
},
{
"identifier": "logits_to_pred",
"path": "active_learning/acquisition.py",
"snippet": "def logits_to_pred(logits_N_K_C: Tensor, return_prob: bool = True, return_uncertainty: bool = True) -> (Tensor, Tensor):\n \"\"\" Get the probabilities/class vector and sample uncertainty from the logits \"\"\"\n\n mean_probs_N_C = torch.mean(torch.exp(logits_N_K_C), dim=1)\n uncertainty = mean_sample_entropy(logits_N_K_C)\n\n if return_prob:\n y_hat = mean_probs_N_C\n else:\n y_hat = torch.argmax(mean_probs_N_C, dim=1)\n\n if return_uncertainty:\n return y_hat, uncertainty\n else:\n return y_hat"
}
] | import pandas as pd
import numpy as np
import torch
from active_learning.nn import Ensemble
from active_learning.data_prep import MasterDataset
from active_learning.data_handler import Handler
from active_learning.utils import Evaluate, to_torch_dataloader
from active_learning.acquisition import Acquisition, logits_to_pred
from tqdm.auto import tqdm
from torch.utils.data import WeightedRandomSampler
from math import ceil | 4,060 | """
This script contains the main active learning loop that runs all experiments.
Author: Derek van Tilborg, Eindhoven University of Technology, May 2023
"""
INFERENCE_BATCH_SIZE = 512
TRAINING_BATCH_SIZE = 64
NUM_WORKERS = 4
def active_learning(n_start: int = 64, acquisition_method: str = 'exploration', max_screen_size: int = None,
batch_size: int = 16, architecture: str = 'gcn', seed: int = 0, bias: str = 'random',
optimize_hyperparameters: bool = False, ensemble_size: int = 10, retrain: bool = True,
anchored: bool = True, dataset: str = 'ALDH1') -> pd.DataFrame:
"""
:param n_start: number of molecules to start out with
:param acquisition_method: acquisition method, as defined in active_learning.acquisition
:param max_screen_size: we stop when this number of molecules has been screened
:param batch_size: number of molecules to add every cycle
:param architecture: 'gcn' or 'mlp'
:param seed: int 1-20
:param bias: 'random', 'small', 'large'
:param optimize_hyperparameters: Bool
:param ensemble_size: number of models in the ensemble, default is 10
:return: dataframe with results
"""
# Load the datasets
representation = 'ecfp' if architecture == 'mlp' else 'graph'
| """
This script contains the main active learning loop that runs all experiments.
Author: Derek van Tilborg, Eindhoven University of Technology, May 2023
"""
INFERENCE_BATCH_SIZE = 512
TRAINING_BATCH_SIZE = 64
NUM_WORKERS = 4
def active_learning(n_start: int = 64, acquisition_method: str = 'exploration', max_screen_size: int = None,
batch_size: int = 16, architecture: str = 'gcn', seed: int = 0, bias: str = 'random',
optimize_hyperparameters: bool = False, ensemble_size: int = 10, retrain: bool = True,
anchored: bool = True, dataset: str = 'ALDH1') -> pd.DataFrame:
"""
:param n_start: number of molecules to start out with
:param acquisition_method: acquisition method, as defined in active_learning.acquisition
:param max_screen_size: we stop when this number of molecules has been screened
:param batch_size: number of molecules to add every cycle
:param architecture: 'gcn' or 'mlp'
:param seed: int 1-20
:param bias: 'random', 'small', 'large'
:param optimize_hyperparameters: Bool
:param ensemble_size: number of models in the ensemble, default is 10
:return: dataframe with results
"""
# Load the datasets
representation = 'ecfp' if architecture == 'mlp' else 'graph' | ds_screen = MasterDataset('screen', representation=representation, dataset=dataset) | 1 | 2023-11-10 08:53:40+00:00 | 8k |
yunik1004/SAiD | script/train.py | [
{
"identifier": "SAID",
"path": "said/model/diffusion.py",
"snippet": "class SAID(ABC, nn.Module):\n \"\"\"Abstract class of SAiD models\"\"\"\n\n denoiser: nn.Module\n\n def __init__(\n self,\n audio_config: Optional[Wav2Vec2Config] = None,\n audio_processor: Optional[Wav2Vec2Processor] = None,\n noise_scheduler: Type[SchedulerMixin] = DDIMScheduler,\n in_channels: int = 32,\n feature_dim: int = -1,\n diffusion_steps: int = 1000,\n latent_scale: float = 1,\n prediction_type: str = \"epsilon\",\n ):\n \"\"\"Constructor of SAID_UNet1D\n\n Parameters\n ----------\n audio_config : Optional[Wav2Vec2Config], optional\n Wav2Vec2Config object, by default None\n audio_processor : Optional[Wav2Vec2Processor], optional\n Wav2Vec2Processor object, by default None\n noise_scheduler: Type[SchedulerMixin]\n Noise scheduler, by default DDIMScheduler\n in_channels : int\n Dimension of the input, by default 32\n feature_dim : int\n Dimension of the latent feature, by default -1\n diffusion_steps : int\n The number of diffusion steps, by default 1000\n latent_scale : float\n Scaling the latent, by default 1\n prediction_type: str\n Prediction type of the scheduler function, \"epsilon\", \"sample\", or \"v_prediction\", by default \"epsilon\"\n \"\"\"\n super().__init__()\n\n # Audio-related\n self.audio_config = (\n audio_config if audio_config is not None else Wav2Vec2Config()\n )\n self.audio_encoder = ModifiedWav2Vec2Model(self.audio_config)\n self.audio_processor = (\n audio_processor\n if audio_processor is not None\n else Wav2Vec2Processor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n )\n self.sampling_rate = self.audio_processor.feature_extractor.sampling_rate\n\n self.latent_scale = latent_scale\n\n # Noise scheduler\n self.noise_scheduler = noise_scheduler(\n num_train_timesteps=diffusion_steps,\n beta_schedule=\"squaredcos_cap_v2\",\n prediction_type=prediction_type,\n )\n\n # Feature embedding\n self.feature_dim = feature_dim\n if self.feature_dim > 0:\n self.audio_proj_layer = nn.Linear(\n self.audio_config.output_hidden_size, self.feature_dim\n )\n self.null_cond_emb = nn.Parameter(torch.randn(1, 1, self.feature_dim))\n else:\n self.null_cond_emb = nn.Parameter(\n torch.randn(1, 1, self.audio_config.output_hidden_size)\n )\n\n \"\"\"\n # Relieve the clipping\n self.noise_scheduler.betas = betas_for_alpha_bar(diffusion_steps, 1 - 1e-15)\n self.noise_scheduler.alphas = 1.0 - self.noise_scheduler.betas\n self.noise_scheduler.alphas_cumprod = torch.cumprod(\n self.noise_scheduler.alphas, dim=0\n )\n \"\"\"\n\n def forward(\n self,\n noisy_samples: torch.FloatTensor,\n timesteps: torch.LongTensor,\n audio_embedding: torch.FloatTensor,\n ) -> torch.FloatTensor:\n \"\"\"Return the predicted noise in the noisy samples\n\n Parameters\n ----------\n noisy_samples : torch.FloatTensor\n (Batch_size, coeffs_seq_len, in_channels), Sequence of noisy coefficients\n timesteps : torch.LongTensor\n (Batch_size,) or (1,), Timesteps\n audio_embedding : torch.FloatTensor\n (Batch_size, embedding_seq_len, embedding_size), Sequence of audio embeddings\n\n Returns\n -------\n torch.FloatTensor\n (Batch_size, coeffs_seq_len, num_coeffs), Sequence of predicted noises\n \"\"\"\n timestep_size = timesteps.size()\n if len(timestep_size) == 0 or timestep_size[0] == 1:\n batch_size = noisy_samples.shape[0]\n timesteps = timesteps.repeat(batch_size)\n\n noise_pred = self.denoiser(noisy_samples, timesteps, audio_embedding)\n return noise_pred\n\n def pred_original_sample(\n self,\n noisy_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.LongTensor,\n ) -> torch.FloatTensor:\n \"\"\"Predict the denoised sample (x_{0}) based on the noisy samples and the noise\n\n Parameters\n ----------\n noisy_samples : torch.FloatTensor\n (Batch_size, coeffs_seq_len, in_channels), Noisy sample\n noise : torch.FloatTensor\n (Batch_size, coeffs_seq_len, in_channels), Noise\n timesteps : torch.LongTensor\n (Batch_size,), Current timestep\n\n Returns\n -------\n torch.FloatTensor\n Predicted denoised sample (x_{0})\n \"\"\"\n alpha_prod_t = self.noise_scheduler.alphas_cumprod[timesteps].view(-1, 1, 1)\n beta_prod_t = 1 - alpha_prod_t\n\n pred_original_sample = (\n noisy_samples - beta_prod_t**0.5 * noise\n ) / alpha_prod_t**0.5\n\n return pred_original_sample\n\n def process_audio(\n self, waveform: Union[np.ndarray, torch.Tensor, List[np.ndarray]]\n ) -> torch.FloatTensor:\n \"\"\"Process the waveform to fit the audio encoder\n\n Parameters\n ----------\n waveform : Union[np.ndarray, torch.Tensor, List[np.ndarray]]\n - np.ndarray, torch.Tensor: (audio_seq_len,)\n - List[np.ndarray]: each (audio_seq_len,)\n\n Returns\n -------\n torch.FloatTensor\n (Batch_size, T_a), Processed mono waveform\n \"\"\"\n out = self.audio_processor(\n waveform, sampling_rate=self.sampling_rate, return_tensors=\"pt\"\n )[\"input_values\"]\n return out\n\n def get_audio_embedding(\n self, waveform: torch.FloatTensor, num_frames: Optional[int]\n ) -> torch.FloatTensor:\n \"\"\"Return the audio embedding of the waveform\n\n Parameters\n ----------\n waveform : torch.FloatTensor\n (Batch_size, T_a), Processed mono waveform\n num_frames: Optional[int]\n The length of output audio embedding sequence, by default None\n\n Returns\n -------\n torch.FloatTensor\n (Batch_size, embed_seq_len, embed_size), Generated audio embedding.\n If num_frames is not None, embed_seq_len = num_frames.\n \"\"\"\n features = self.audio_encoder(waveform, num_frames=num_frames).last_hidden_state\n if self.feature_dim > 0:\n features = self.audio_proj_layer(features)\n return features\n\n def get_random_timesteps(self, batch_size: int) -> torch.LongTensor:\n \"\"\"Return the random timesteps\n\n Parameters\n ----------\n batch_size : int\n Size of the batch\n\n Returns\n -------\n torch.LongTensor\n (batch_size,), random timesteps\n \"\"\"\n timesteps = torch.randint(\n 0,\n self.noise_scheduler.config.num_train_timesteps,\n (batch_size,),\n dtype=torch.long,\n )\n return timesteps\n\n def add_noise(\n self, sample: torch.FloatTensor, timestep: torch.LongTensor\n ) -> SAIDNoiseAdditionOutput:\n \"\"\"Add the noise into the sample\n\n Parameters\n ----------\n sample : torch.FloatTensor\n Sample to be noised\n timestep : torch.LongTensor\n (num_timesteps,), Timestep of the noise scheduler\n\n Returns\n -------\n SAIDNoiseAdditionOutput\n Noisy sample and the added noise\n \"\"\"\n noise = torch.randn(sample.shape, device=sample.device)\n noisy_sample = self.noise_scheduler.add_noise(sample, noise, timestep)\n velocity = self.noise_scheduler.get_velocity(sample, noise, timestep)\n\n return SAIDNoiseAdditionOutput(\n noisy_sample=noisy_sample, noise=noise, velocity=velocity\n )\n\n def encode_samples(self, samples: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"Encode samples into latent\n\n Parameters\n ----------\n samples : torch.FloatTensor\n (Batch_size, sample_seq_len, in_channels), Samples\n\n Returns\n -------\n torch.FloatTensor\n (Batch_size, sample_seq_len, in_channels), Output latent\n \"\"\"\n return samples.clone()\n\n def decode_latent(self, latent: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"Decode latent into samples\n\n Parameters\n ----------\n latent : torch.FloatTensor\n (Batch_size, sample_seq_len, in_channels), Latent\n\n Returns\n -------\n torch.FloatTensor\n (Batch_size, sample_seq_len, in_channels), Output samples\n \"\"\"\n return latent.clone()\n\n def inference(\n self,\n waveform_processed: torch.FloatTensor,\n init_samples: Optional[torch.FloatTensor] = None,\n mask: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 100,\n strength: float = 1.0,\n guidance_scale: float = 2.5,\n guidance_rescale: float = 0.0,\n eta: float = 0.0,\n fps: int = 60,\n save_intermediate: bool = False,\n show_process: bool = False,\n ) -> SAIDInferenceOutput:\n \"\"\"Inference pipeline\n\n Parameters\n ----------\n waveform_processed : torch.FloatTensor\n (Batch_size, T_a), Processed mono waveform\n init_samples : Optional[torch.FloatTensor], optional\n (Batch_size, sample_seq_len, x_dim), Starting point for the process, by default None\n mask : Optional[torch.FloatTensor], optional\n (Batch_size, sample_seq_len, x_dim), Mask the region not to be changed, by default None\n num_inference_steps : int, optional\n The number of denoising steps, by default 100\n strength: float, optional\n How much to paint. Must be between 0 and 1, by default 1.0\n guidance_scale : float, optional\n Guidance scale in classifier-free guidance, by default 2.5\n guidance_rescale : float, optional\n Guidance rescale to control rescale strength, by default 0.0\n eta : float, optional\n Eta in DDIM, by default 0.0\n fps : int, optional\n The number of frames per second, by default 60\n save_intermediate: bool, optional\n Return the intermediate results, by default False\n show_process: bool, optional\n Visualize the inference process, by default False\n\n Returns\n -------\n SAIDInferenceOutput\n Inference results and the intermediates\n \"\"\"\n batch_size = waveform_processed.shape[0]\n waveform_len = waveform_processed.shape[1]\n in_channels = self.denoiser.in_channels\n device = waveform_processed.device\n do_classifier_free_guidance = guidance_scale > 1.0\n window_size = int(waveform_len / self.sampling_rate * fps)\n\n self.noise_scheduler.set_timesteps(num_inference_steps, device=device)\n\n latents = (\n torch.randn(batch_size, window_size, in_channels, device=device)\n if init_samples is None\n else self.encode_samples(init_samples)\n )\n\n # Scaling the latent\n latents *= self.latent_scale * self.noise_scheduler.init_noise_sigma\n\n init_latents = latents.clone()\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n # Add additional noise\n noise = None\n if init_samples is not None:\n timestep = self.noise_scheduler.timesteps[-init_timestep]\n timesteps = torch.tensor(\n [timestep] * batch_size, dtype=torch.long, device=device\n )\n\n noise_output = self.add_noise(latents, timesteps)\n latents = noise_output.noisy_sample\n noise = noise_output.noise\n\n audio_embedding = self.get_audio_embedding(waveform_processed, window_size)\n if do_classifier_free_guidance:\n \"\"\"\n uncond_waveform = [np.zeros((waveform_len)) for _ in range(batch_size)]\n uncond_waveform_processed = self.process_audio(uncond_waveform).to(device)\n uncond_audio_embedding = self.get_audio_embedding(\n uncond_waveform_processed, window_size\n )\n \"\"\"\n # uncond_audio_embedding = torch.zeros_like(audio_embedding)\n uncond_audio_embedding = self.null_cond_emb.repeat(\n batch_size, audio_embedding.shape[1], 1\n )\n audio_embedding = torch.cat([uncond_audio_embedding, audio_embedding])\n\n # Prepare extra kwargs for the scheduler step\n extra_step_kwargs = {}\n if \"eta\" in set(inspect.signature(self.noise_scheduler.step).parameters.keys()):\n extra_step_kwargs[\"eta\"] = eta\n\n intermediates = []\n\n t_start = num_inference_steps - init_timestep\n\n for idx, t in enumerate(\n tqdm(\n self.noise_scheduler.timesteps[t_start:],\n disable=not show_process,\n )\n ):\n if save_intermediate:\n interm = self.decode_latent(latents / self.latent_scale)\n intermediates.append(interm)\n\n latent_model_input = (\n torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n )\n latent_model_input = self.noise_scheduler.scale_model_input(\n latent_model_input, t\n )\n\n noise_pred = self.forward(latent_model_input, t, audio_embedding)\n\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_audio = noise_pred.chunk(2)\n noise_pred = noise_pred_audio + guidance_scale * (\n noise_pred_audio - noise_pred_uncond\n )\n\n if guidance_rescale > 0.0:\n noise_pred = rescale_noise_cfg(\n noise_pred, noise_pred_audio, guidance_rescale\n )\n\n latents = self.noise_scheduler.step(\n noise_pred, t, latents, **extra_step_kwargs\n ).prev_sample\n\n # Masking\n if init_samples is not None and mask is not None:\n init_latents_noisy = init_latents\n\n tdx_next = t_start + idx + 1\n if tdx_next < num_inference_steps:\n t_next = self.noise_scheduler.timesteps[tdx_next]\n init_latents_noisy = self.noise_scheduler.add_noise(\n init_latents, noise, t_next\n )\n\n latents = init_latents_noisy * mask + latents * (1 - mask)\n\n # Start clipping after 90% done\n \"\"\"\n if idx / init_timestep > 0.9:\n latents = (\n self.encode_samples(\n self.decode_latent(latents / self.latent_scale).clamp(0, 1)\n )\n * self.latent_scale\n )\n \"\"\"\n\n # Re-scaling & clipping the latent\n result = self.decode_latent(latents / self.latent_scale).clamp(0, 1)\n\n return SAIDInferenceOutput(result=result, intermediates=intermediates)"
},
{
"identifier": "SAID_UNet1D",
"path": "said/model/diffusion.py",
"snippet": "class SAID_UNet1D(SAID):\n \"\"\"SAiD model implemented using U-Net 1D model\"\"\"\n\n def __init__(\n self,\n audio_config: Optional[Wav2Vec2Config] = None,\n audio_processor: Optional[Wav2Vec2Processor] = None,\n noise_scheduler: Type[SchedulerMixin] = DDIMScheduler,\n in_channels: int = 32,\n feature_dim: int = -1,\n diffusion_steps: int = 1000,\n latent_scale: float = 1,\n prediction_type: str = \"epsilon\",\n ):\n \"\"\"Constructor of SAID_UNet1D\n\n Parameters\n ----------\n audio_config : Optional[Wav2Vec2Config], optional\n Wav2Vec2Config object, by default None\n audio_processor : Optional[Wav2Vec2Processor], optional\n Wav2Vec2Processor object, by default None\n noise_scheduler: Type[SchedulerMixin]\n Noise scheduler, by default DDIMScheduler\n in_channels : int\n Dimension of the input, by default 32\n feature_dim : int\n Dimension of the latent feature, by default -1\n diffusion_steps : int\n The number of diffusion steps, by default 1000\n latent_scale : float\n Scaling the latent, by default 1\n prediction_type: str\n Prediction type of the scheduler function, \"epsilon\", \"sample\", or \"v_prediction\", by default \"epsilon\"\n \"\"\"\n super().__init__(\n audio_config=audio_config,\n audio_processor=audio_processor,\n in_channels=in_channels,\n feature_dim=feature_dim,\n diffusion_steps=diffusion_steps,\n latent_scale=latent_scale,\n prediction_type=prediction_type,\n )\n\n # Denoiser\n self.denoiser = UNet1DConditionModel(\n in_channels=in_channels,\n out_channels=in_channels,\n cross_attention_dim=self.feature_dim\n if self.feature_dim > 0\n else self.audio_config.hidden_size,\n )"
},
{
"identifier": "ModifiedWav2Vec2Model",
"path": "said/model/wav2vec2.py",
"snippet": "class ModifiedWav2Vec2Model(Wav2Vec2Model):\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n num_frames: Optional[int] = None,\n ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n extract_features = self.feature_extractor(input_values)\n\n # Change the number of frames\n if num_frames is not None:\n extract_features = F.interpolate(\n extract_features, size=num_frames, align_corners=True, mode=\"linear\"\n )\n\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states,\n mask_time_indices=mask_time_indices,\n attention_mask=attention_mask,\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.adapter is not None:\n hidden_states = self.adapter(hidden_states)\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )"
},
{
"identifier": "load_blendshape_coeffs",
"path": "said/util/blendshape.py",
"snippet": "def load_blendshape_coeffs(coeffs_path: str) -> torch.FloatTensor:\n \"\"\"Load the blendshape coefficients file\n\n Parameters\n ----------\n coeffs_path : str\n Path of the blendshape coefficients file (csv format)\n\n Returns\n -------\n torch.FloatTensor\n (T_b, num_classes), Blendshape coefficients\n \"\"\"\n df = pd.read_csv(coeffs_path)\n coeffs = torch.FloatTensor(df.values)\n return coeffs"
}
] | import argparse
import os
import pathlib
import torch
from dataclasses import dataclass
from typing import Optional
from accelerate import Accelerator
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from torch import nn
from torch.utils.data import DataLoader, RandomSampler
from tqdm import tqdm
from said.model.diffusion import SAID, SAID_UNet1D
from said.model.wav2vec2 import ModifiedWav2Vec2Model
from said.util.blendshape import load_blendshape_coeffs
from dataset.dataset_voca import DataBatch, BlendVOCATrainDataset, BlendVOCAValDataset | 7,098 | help="Directory of the outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--window_size_min",
type=int,
default=120,
help="Minimum window size of the blendshape coefficients sequence at training",
)
parser.add_argument(
"--batch_size", type=int, default=8, help="Batch size at training"
)
parser.add_argument(
"--epochs", type=int, default=100000, help="The number of epochs"
)
parser.add_argument(
"--num_warmup_epochs",
type=int,
default=5000,
help="The number of warmup epochs",
)
parser.add_argument(
"--num_workers", type=int, default=0, help="The number of workers"
)
parser.add_argument(
"--learning_rate", type=float, default=1e-5, help="Learning rate"
)
parser.add_argument(
"--uncond_prob",
type=float,
default=0.1,
help="Unconditional probability of waveform (for classifier-free guidance)",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--weight_vel",
type=float,
default=1.0,
help="Weight for the velocity loss",
)
parser.add_argument(
"--weight_vertex",
type=float,
default=0.02,
help="Weight for the vertex loss",
)
parser.add_argument(
"--ema",
type=bool,
default=True,
help="Use Exponential Moving Average of models weights",
)
parser.add_argument(
"--ema_decay",
type=float,
default=0.9999,
help="Ema decay rate",
)
parser.add_argument(
"--val_period", type=int, default=200, help="Period of validating model"
)
parser.add_argument(
"--val_repeat", type=int, default=50, help="Number of repetition of val dataset"
)
parser.add_argument(
"--save_period", type=int, default=200, help="Period of saving model"
)
args = parser.parse_args()
audio_dir = args.audio_dir
coeffs_dir = args.coeffs_dir
coeffs_std_path = args.coeffs_std_path
blendshape_deltas_path = args.blendshape_residuals_path
if blendshape_deltas_path == "":
blendshape_deltas_path = None
landmarks_path = args.landmarks_path
if landmarks_path == "":
landmarks_path = None
coeffs_std = (
None if coeffs_std_path == "" else load_blendshape_coeffs(coeffs_std_path)
)
output_dir = args.output_dir
prediction_type = args.prediction_type
window_size_min = args.window_size_min
batch_size = args.batch_size
epochs = args.epochs
num_warmup_epochs = args.num_warmup_epochs
num_workers = args.num_workers
learning_rate = args.learning_rate
uncond_prob = args.uncond_prob
unet_feature_dim = args.unet_feature_dim
weight_vel = args.weight_vel
weight_vertex = args.weight_vertex
ema = args.ema
ema_decay = args.ema_decay
val_period = args.val_period
val_repeat = args.val_repeat
save_period = args.save_period
# Initialize accelerator
accelerator = Accelerator(log_with="tensorboard", project_dir=output_dir)
if accelerator.is_main_process:
accelerator.init_trackers("SAiD")
said_model = SAID_UNet1D(
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
| """Train the SAID_UNet1D model
"""
@dataclass
class LossStepOutput:
"""
Dataclass for the losses at each step
"""
predict: torch.FloatTensor # MAE loss for the predicted output
velocity: torch.FloatTensor # MAE loss for the velocity
vertex: Optional[torch.FloatTensor] # MAE loss for the reconstructed vertex
@dataclass
class LossEpochOutput:
"""
Dataclass for the averaged losses at each epoch
"""
total: float = 0 # Averaged total loss
predict: float = 0 # Averaged prediction loss
velocity: float = 0 # Averaged velocity loss
vertex: float = 0 # Averaged vertex loss
lr: Optional[float] = None # Last learning rate
def random_noise_loss(
said_model: SAID,
data: DataBatch,
std: Optional[torch.FloatTensor],
device: torch.device,
prediction_type: str = "epsilon",
) -> LossStepOutput:
"""Compute the loss with randomized noises
Parameters
----------
said_model : SAID
SAiD model object
data : DataBatch
Output of the BlendVOCADataset.collate_fn
std : Optional[torch.FloatTensor]
(1, x_dim), Standard deviation of coefficients
device : torch.device
GPU device
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
Returns
-------
LossStepOutput
Computed losses
"""
waveform = data.waveform
blendshape_coeffs = data.blendshape_coeffs.to(device)
cond = data.cond.to(device)
coeff_latents = said_model.encode_samples(
blendshape_coeffs * said_model.latent_scale
)
curr_batch_size = len(waveform)
window_size = blendshape_coeffs.shape[1]
waveform_processed = said_model.process_audio(waveform).to(device)
random_timesteps = said_model.get_random_timesteps(curr_batch_size).to(device)
cond_embedding = said_model.get_audio_embedding(waveform_processed, window_size)
uncond_embedding = said_model.null_cond_emb.repeat(
curr_batch_size, cond_embedding.shape[1], 1
)
cond_mask = cond.view(-1, 1, 1)
audio_embedding = cond_embedding * cond_mask + uncond_embedding * torch.logical_not(
cond_mask
)
noise_dict = said_model.add_noise(coeff_latents, random_timesteps)
noisy_latents = noise_dict.noisy_sample
noise = noise_dict.noise
velocity = noise_dict.velocity
pred = said_model(noisy_latents, random_timesteps, audio_embedding)
# Set answer corresponding to prediction_type
answer = None
if prediction_type == "epsilon":
answer = noise
elif prediction_type == "sample":
answer = coeff_latents
elif prediction_type == "v_prediction":
answer = velocity
criterion_pred = nn.L1Loss()
criterion_velocity = nn.L1Loss()
criterion_vertex = nn.L1Loss()
answer_reweight = answer
pred_reweight = pred
if std is not None:
answer_reweight /= std.view(1, 1, -1)
pred_reweight /= std.view(1, 1, -1)
loss_pred = criterion_pred(pred_reweight, answer_reweight)
answer_diff = answer_reweight[:, 1:, :] - answer_reweight[:, :-1, :]
pred_diff = pred_reweight[:, 1:, :] - pred_reweight[:, :-1, :]
loss_vel = criterion_velocity(pred_diff, answer_diff)
loss_vertex = None
if data.blendshape_delta is not None:
blendshape_delta = data.blendshape_delta.to(device)
b, k, v, i = blendshape_delta.shape
_, t, _ = answer.shape
blendshape_delta_norm = torch.norm(blendshape_delta, p=1, dim=[1, 2, 3]) / (
k * v * i
)
blendshape_delta_normalized = torch.div(
blendshape_delta,
blendshape_delta_norm.view(-1, 1, 1, 1),
)
be_answer = torch.bmm(answer, blendshape_delta_normalized.view(b, k, v * i))
be_pred = torch.bmm(pred, blendshape_delta_normalized.view(b, k, v * i))
# be_answer = torch.einsum("bkvi,btk->btvi", blendshape_delta_normalized, answer)
# be_pred = torch.einsum("bkvi,btk->btvi", blendshape_delta_normalized, pred)
loss_vertex = criterion_vertex(be_pred, be_answer)
return LossStepOutput(
predict=loss_pred,
velocity=loss_vel,
vertex=loss_vertex,
)
def train_epoch(
said_model: SAID,
train_dataloader: DataLoader,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler,
accelerator: Accelerator,
std: Optional[torch.FloatTensor],
weight_vel: float,
weight_vertex: float,
prediction_type: str = "epsilon",
ema_model: Optional[EMAModel] = None,
) -> LossEpochOutput:
"""Train the SAiD model one epoch.
Parameters
----------
said_model : SAID
SAiD model object
train_dataloader : DataLoader
Dataloader of the BlendVOCATrainDataset
optimizer : torch.optim.Optimizer
Optimizer object
lr_scheduler: torch.optim.lr_scheduler
Learning rate scheduler object
accelerator : Accelerator
Accelerator object
std : Optional[torch.FloatTensor]
(1, x_dim), Standard deviation of coefficients
weight_vel: float
Weight for the velocity loss
weight_vertex: float
Weight for the vertex loss
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
ema_model: Optional[EMAModel]
EMA model of said_model, by default None
Returns
-------
LossEpochOutput
Average losses
"""
device = accelerator.device
if std is not None:
std = std.to(device)
said_model.train()
train_total_losses = {
"loss": 0,
"loss_predict": 0,
"loss_velocity": 0,
"loss_vertex": 0,
}
train_total_num = 0
for data in train_dataloader:
curr_batch_size = len(data.waveform)
with accelerator.accumulate(said_model):
losses = random_noise_loss(said_model, data, std, device, prediction_type)
loss = losses.predict + weight_vel * losses.velocity
if losses.vertex is not None:
loss += weight_vertex * losses.vertex
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(said_model.parameters(), 1.0)
optimizer.step()
if ema_model:
ema_model.step(said_model.parameters())
lr_scheduler.step()
optimizer.zero_grad()
train_total_losses["loss"] += loss.item() * curr_batch_size
train_total_losses["loss_predict"] += losses.predict.item() * curr_batch_size
train_total_losses["loss_velocity"] += losses.velocity.item() * curr_batch_size
if losses.vertex is not None:
train_total_losses["loss_vertex"] += losses.vertex.item() * curr_batch_size
train_total_num += curr_batch_size
train_avg_losses = LossEpochOutput(
total=train_total_losses["loss"] / train_total_num,
predict=train_total_losses["loss_predict"] / train_total_num,
velocity=train_total_losses["loss_velocity"] / train_total_num,
vertex=train_total_losses["loss_vertex"] / train_total_num,
lr=lr_scheduler.get_last_lr()[0],
)
return train_avg_losses
def validate_epoch(
said_model: SAID,
val_dataloader: DataLoader,
accelerator: Accelerator,
std: torch.FloatTensor,
weight_vel: float,
weight_vertex: float,
prediction_type: str = "epsilon",
num_repeat: int = 1,
) -> LossEpochOutput:
"""Validate the SAiD model one epoch.
Parameters
----------
said_model : SAID
SAiD model object
val_dataloader : DataLoader
Dataloader of the BlendVOCAValDataset
accelerator : Accelerator
Accelerator object
std : torch.FloatTensor
(1, x_dim), Standard deviation of coefficients
weight_vel: float
Weight for the velocity loss
weight_vertex: float
Weight for the vertex loss
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
num_repeat : int, optional
Number of the repetition, by default 1
Returns
-------
LossEpochOutput
Average losses
"""
device = accelerator.device
if std is not None:
std = std.to(device)
said_model.eval()
val_total_losses = {
"loss": 0,
"loss_predict": 0,
"loss_velocity": 0,
"loss_vertex": 0,
}
val_total_num = 0
with torch.no_grad():
for _ in range(num_repeat):
for data in val_dataloader:
curr_batch_size = len(data.waveform)
losses = random_noise_loss(
said_model, data, std, device, prediction_type
)
loss = losses.predict + weight_vel * losses.velocity
if losses.vertex is not None:
loss += weight_vertex * losses.vertex
val_total_losses["loss"] += loss.item() * curr_batch_size
val_total_losses["loss_predict"] += (
losses.predict.item() * curr_batch_size
)
val_total_losses["loss_velocity"] += (
losses.velocity.item() * curr_batch_size
)
if losses.vertex is not None:
val_total_losses["loss_vertex"] += (
losses.vertex.item() * curr_batch_size
)
val_total_num += curr_batch_size
val_avg_losses = LossEpochOutput(
total=val_total_losses["loss"] / val_total_num,
predict=val_total_losses["loss_predict"] / val_total_num,
velocity=val_total_losses["loss_velocity"] / val_total_num,
vertex=val_total_losses["loss_vertex"] / val_total_num,
)
return val_avg_losses
def main() -> None:
"""Main function"""
default_data_dir = pathlib.Path(__file__).resolve().parent.parent / "data"
# Arguments
parser = argparse.ArgumentParser(
description="Train the SAiD model using BlendVOCA dataset"
)
parser.add_argument(
"--audio_dir",
type=str,
default="../BlendVOCA/audio",
help="Directory of the audio data",
)
parser.add_argument(
"--coeffs_dir",
type=str,
default="../BlendVOCA/blendshape_coeffs",
help="Directory of the blendshape coefficients data",
)
parser.add_argument(
"--coeffs_std_path",
type=str,
default="", # default_data_dir / "coeffs_std.csv",
help="Path of the coeffs std data",
)
parser.add_argument(
"--blendshape_residuals_path",
type=str,
default="", # default_data_dir / "blendshape_residuals.pickle",
help="Path of the blendshape residuals",
)
parser.add_argument(
"--landmarks_path",
type=str,
default="", # default_data_dir / "FLAME_head_landmarks.txt",
help="Path of the landmarks data",
)
parser.add_argument(
"--output_dir",
type=str,
default="../output",
help="Directory of the outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--window_size_min",
type=int,
default=120,
help="Minimum window size of the blendshape coefficients sequence at training",
)
parser.add_argument(
"--batch_size", type=int, default=8, help="Batch size at training"
)
parser.add_argument(
"--epochs", type=int, default=100000, help="The number of epochs"
)
parser.add_argument(
"--num_warmup_epochs",
type=int,
default=5000,
help="The number of warmup epochs",
)
parser.add_argument(
"--num_workers", type=int, default=0, help="The number of workers"
)
parser.add_argument(
"--learning_rate", type=float, default=1e-5, help="Learning rate"
)
parser.add_argument(
"--uncond_prob",
type=float,
default=0.1,
help="Unconditional probability of waveform (for classifier-free guidance)",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--weight_vel",
type=float,
default=1.0,
help="Weight for the velocity loss",
)
parser.add_argument(
"--weight_vertex",
type=float,
default=0.02,
help="Weight for the vertex loss",
)
parser.add_argument(
"--ema",
type=bool,
default=True,
help="Use Exponential Moving Average of models weights",
)
parser.add_argument(
"--ema_decay",
type=float,
default=0.9999,
help="Ema decay rate",
)
parser.add_argument(
"--val_period", type=int, default=200, help="Period of validating model"
)
parser.add_argument(
"--val_repeat", type=int, default=50, help="Number of repetition of val dataset"
)
parser.add_argument(
"--save_period", type=int, default=200, help="Period of saving model"
)
args = parser.parse_args()
audio_dir = args.audio_dir
coeffs_dir = args.coeffs_dir
coeffs_std_path = args.coeffs_std_path
blendshape_deltas_path = args.blendshape_residuals_path
if blendshape_deltas_path == "":
blendshape_deltas_path = None
landmarks_path = args.landmarks_path
if landmarks_path == "":
landmarks_path = None
coeffs_std = (
None if coeffs_std_path == "" else load_blendshape_coeffs(coeffs_std_path)
)
output_dir = args.output_dir
prediction_type = args.prediction_type
window_size_min = args.window_size_min
batch_size = args.batch_size
epochs = args.epochs
num_warmup_epochs = args.num_warmup_epochs
num_workers = args.num_workers
learning_rate = args.learning_rate
uncond_prob = args.uncond_prob
unet_feature_dim = args.unet_feature_dim
weight_vel = args.weight_vel
weight_vertex = args.weight_vertex
ema = args.ema
ema_decay = args.ema_decay
val_period = args.val_period
val_repeat = args.val_repeat
save_period = args.save_period
# Initialize accelerator
accelerator = Accelerator(log_with="tensorboard", project_dir=output_dir)
if accelerator.is_main_process:
accelerator.init_trackers("SAiD")
said_model = SAID_UNet1D(
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
) | said_model.audio_encoder = ModifiedWav2Vec2Model.from_pretrained( | 2 | 2023-11-03 06:38:51+00:00 | 8k |
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/modeling/sam.py | [
{
"identifier": "ImageEncoderViT",
"path": "SAMed/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embed(x) # pre embed: [1, 3, 1024, 1024], post embed: [1, 64, 64, 768]\n if self.pos_embed is not None:\n x = x + self.pos_embed\n\n for blk in self.blocks:\n x = blk(x)\n\n x = self.neck(x.permute(0, 3, 1, 2)) # [b, c, h, w], [1, 256, 64, 64]\n\n return x"
},
{
"identifier": "MaskDecoder",
"path": "SAMed/segment_anything/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n tranformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n # if multimask_output:\n # mask_slice = slice(1, None)\n # else:\n # mask_slice = slice(0, 1)\n # masks = masks[:, mask_slice, :, :]\n # iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n hyper_in = torch.stack(hyper_in_list, dim=1) # [b, c, token_num]\n\n b, c, h, w = upscaled_embedding.shape # [h, token_num, h, w]\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) # [1, 4, 256, 256], 256 = 4 * 64, the size of image embeddings\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "SAMed/segment_anything/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n ) # downsample to 1/4\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
}
] | import torch
from torch import nn
from torch.nn import functional as F
from icecream import ic
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder | 4,202 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder, | mask_decoder: MaskDecoder, | 1 | 2023-11-03 17:05:40+00:00 | 8k |
microsoft/PLEX | scripts/exps_on_MW.py | [
{
"identifier": "pretrain_EX",
"path": "PLEX/pretraining_EX.py",
"snippet": "def pretrain_EX(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Pretraining the Execuctor ===\")\n parser = argparse.ArgumentParser()\n\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_common_pretraining_args(parser)\n parser.add_argument('--noncontextual_pretrain_tasks', type=str, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n assert args['best_metric'] != 'evaluation/success_rate', 'Currently, evaluation/success_rate is not a valid metric for pretraining. Use evaluation/neg_val_error instead.'\n\n # These parameters are needed only for evaluating the model. Since at the current stage we are pretraining just the EX\n # (inverse dynamics) part of PLEX, the values of the parameters other than bc_learning_mode don't matter, since at the\n # end of this stage the model won't yet know how to handle goal contexts.\n args['bc_learning_mode'] = True\n args['context_style'] = 'blank'\n args['context_from_same_traj'] = False\n args['reward_type'] = 'native'\n args['normalize_reward'] = False\n args['discount'] = 0\n\n # If we are pretraining a PLEX model, for loss computation we should use *just* the inverse dynamics predictions\n # computed based on obs. in the training trajectories (not predictions of the obs., and not predictions of inv.d. based\n # on predicted obs. -- both of these need context to be provided, and we want inv.d. to be context-independent).\n #\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 1\n args['predicted_inverse_dynamics_loss_weight'] = 0\n args['future_prediction_loss_weight'] = 0\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: context-agnostic dynamics data and validation trajectories (if any)\n noncontextual_pretrain_tasks, noncontextual_pretrain_max_trajs = parse_tasks(args['noncontextual_pretrain_tasks'], args['robot'], args['max_pretrain_trajectories'])\n print(f'*** The validation tasks are: {args[\"validation_tasks\"]} ***')\n validation_tasks, validation_max_trajs = parse_tasks(args['validation_tasks'], args['robot'], args['max_validation_trajectories'])\n\n all_pretrain_trajectories = []\n\n # First, load validation data, if any\n if validation_tasks:\n print(\"Reading validation tasks...\")\n data = load_data(log,\n data_dir,\n validation_tasks,\n # NOTE: the parameter that controls this is max_validation_trajectories, *NOT* max_pretrain_trajectories.\n max_trajectories=validation_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n # This doesn't matter for evaluation of pretrained executor.\n normalize_rewards=False,\n # This doesn't matter for evaluation of pretrained executor.\n reward_type='sparse',\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n val_train_data, val_val_data = {}, {}\n for k, v in data.items():\n print(f'Splitting the data of validation task {k}...')\n train_trajectories, val_trajectories = train_val_split(v, args['validation_frac'])\n val_train_data[k] = TrajectoryDataset(train_trajectories, camera_names, True)\n val_val_data[k] = TrajectoryDataset(val_trajectories, camera_names, True)\n print(f'Stored {len(val_train_data[k].trajectories)} training and {len(val_val_data[k].trajectories)} validation trajectories for task {k}...')\n\n \"\"\"\n If we don't have a finetuning stage for evaluating the pretrained model, use the training trajectories\n of the validation tasks for pretraining the model. These tasks' validation trajectories will still be used\n for computing the pretrained model's validation loss.\n \"\"\"\n if args['num_steps_per_ft_eval_iter'] <= 0 and args['validation_frac'] < 1.0:\n print(f\"NOTE: since we aren't doing finetuning for evaluation at pretraining time (num_steps_per_ft_eval_iter = {args['num_steps_per_ft_eval_iter']}), we'll use some of the trajectories from validation task {k} during pretraining. These trajectries are *not* in the validation split.\")\n all_pretrain_trajectories.extend(train_trajectories)\n del data\n\n # Then, load context-agnostic dynamics data\n print(\"Reading context-agnostic dynamics data...\")\n data = load_data(log,\n data_dir,\n noncontextual_pretrain_tasks,\n video_only=False,\n max_trajectories=noncontextual_pretrain_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n # This doesn't matter for evaluation of pretrained executor.\n normalize_rewards=False,\n # This doesn't matter for evaluation of pretrained executor.\n reward_type='sparse',\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n for k, v in data.items():\n log(f'{len(v)} trajectories for task {k}')\n all_pretrain_trajectories.extend(v)\n\n noncontextual_pretrain_data = TrajectoryDataset(all_pretrain_trajectories, camera_names, False)\n del data\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n noncontextual_pretrain_tasks[0],\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n bc_mode=False)\n\n # Prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n batch_sampler = setup_batch_sampler(noncontextual_pretrain_data, None, args, device)\n\n # NOTE: We should reconsider how finetuning-based evaluator works should it be allowed to modify only exactly\n # same set of parameters that training modifies (trainable_params) or a different one (e.g., just the head)?\n #\n # Either way, in the most common evaluation case, i.e., when this evaluator just runs the model against\n # the validation tasks' data without actually doing finetuning (args['num_steps_per_ft_eval_iter'] = 0),\n # this method works correctly now.\n eval_fns = [get_finetuning_based_evaluator(val_train_data, val_val_data, trainable_params, args, device)]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['pretrain_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_pretrain'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = ('pretr_' + args['model'] + '__' if args['model'] != 'PLEX' else 'pretr_EX__')\n metric_values = run_training(trainer, model, args['pretrain_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values"
},
{
"identifier": "pretrain_PL",
"path": "PLEX/pretraining_PL.py",
"snippet": "def pretrain_PL(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Pretraining the Planner ===\")\n parser = argparse.ArgumentParser()\n\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_common_pretraining_args(parser)\n add_conditioning_args(parser)\n parser.add_argument('--video_tasks', type=str, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n if not args['bc_learning_mode']:\n assert 'reward' not in args['modalities_to_mask'], \"If the model is expected to condition on returns, then they should not be masked out.\"\n assert args['best_metric'] != 'evaluation/success_rate', 'Currently, evaluation/success_rate is not a valid metric for pretraining. Use evaluation/neg_val_error instead.'\n\n # If we are pretraining a PLEX model, for loss computation we should use *just* the obs. embedding predictions,\n # not predictions of inverse dynamics.\n #\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 0\n args['predicted_inverse_dynamics_loss_weight'] = 0\n args['future_prediction_loss_weight'] = 1\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: videos and validation trajectories (if any)\n video_tasks, video_max_trajs = parse_tasks(args['video_tasks'])\n print(f'*** The validation tasks are: {args[\"validation_tasks\"]} ***')\n validation_tasks, validation_max_trajs = parse_tasks(args['validation_tasks'], args['robot'], args['max_validation_trajectories'])\n\n all_pretrain_trajectories = []\n # First, load validation data, if any\n if validation_tasks:\n print(\"Reading validation tasks...\")\n data = load_data(log,\n data_dir,\n validation_tasks,\n # NOTE: the parameter that controls this is max_validation_trajectories, *NOT* max_pretrain_trajectories.\n max_trajectories=validation_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n val_train_data, val_val_data = {}, {}\n for k, v in data.items():\n print(f'Splitting the data of validation task {k}...')\n train_trajectories, val_trajectories = train_val_split(v, args['validation_frac'])\n val_train_data[k] = TrajectoryDataset(train_trajectories, camera_names, True)\n val_val_data[k] = TrajectoryDataset(val_trajectories, camera_names, True)\n print(f'Stored {len(val_train_data[k].trajectories)} training and {len(val_val_data[k].trajectories)} validation trajectories for task {k}...')\n\n \"\"\"\n If we don't have a finetuning stage for evaluating the pretrained model, use the training trajectories\n of the validation tasks for pretraining the model. These tasks' validation trajectories will still be used\n for computing the pretrained model's validation loss.\n \"\"\"\n if args['num_steps_per_ft_eval_iter'] <= 0 and args['validation_frac'] < 1.0:\n print(f\"NOTE: since we aren't doing finetuning for evaluation at pretraining time (num_steps_per_ft_eval_iter = {args['num_steps_per_ft_eval_iter']}), we'll use some of the trajectories from validation task {k} during pretraining. These trajectries are *not* in the validation split.\")\n all_pretrain_trajectories.extend(train_trajectories)\n del data\n\n # Then, load video-only data\n print(\"Reading video-only data...\")\n data = load_data(log,\n data_dir,\n video_tasks,\n video_only=True,\n max_trajectories=video_max_trajs,\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n for k, v in data.items():\n log(f'{len(v)} videos for task {k}')\n all_pretrain_trajectories.extend(v)\n\n video_data = TrajectoryDataset(all_pretrain_trajectories, camera_names, True)\n del data\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n video_tasks[0],\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n args['bc_learning_mode'])\n\n # Prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n batch_sampler = setup_batch_sampler(video_data, args['context_style'], args, device)\n\n # NOTE: We should reconsider how finetuning-based evaluator works should it be allowed to modify only exactly\n # same set of parameters that training modifies (trainable_params) or a different one (e.g., just the head)?\n #\n # Either way, in the most common evaluation case, i.e., when this evaluator just runs the model against\n # the validation tasks' data without actually doing finetuning (args['num_steps_per_ft_eval_iter'] = 0),\n # this method works correctly now.\n eval_fns = [get_finetuning_based_evaluator(val_train_data, val_val_data, trainable_params, args, device)]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['pretrain_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_pretrain'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = ('pretr_' + args['model'] + '__' if args['model'] != 'PLEX' else 'pretr_PLEX__')\n metric_values = run_training(trainer, model, args['pretrain_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values"
},
{
"identifier": "finetune",
"path": "PLEX/finetuning.py",
"snippet": "def finetune(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Finetuning ===\")\n parser = argparse.ArgumentParser()\n # Add all relevant command-line arguments\n add_common_args(parser)\n add_conditioning_args(parser)\n parser.add_argument('--finetune_learning_rate', type=float, default=1e-5)\n parser.add_argument('--finetune_steps_per_iter', type=int, default=100)\n parser.add_argument('--target_task', type=str, default=None)\n parser.add_argument('--max_target_trajectories', type=int, default=None)\n\n # Parse them and validate them\n args = parser.parse_args(cmdline_args)\n args = vars(args)\n if not args['bc_learning_mode']:\n assert 'reward' not in args['modalities_to_mask'], \"If the model is expected to condition on returns, then they should not be masked out.\"\n\n # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience.\n # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the\n # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents\n # from it planner PL rather than based on the actual (\"grounded\") observation latents contained\n # in finetuning trajectories.\n if args['model'] == 'PLEX':\n args['grounded_inverse_dynamics_loss_weight'] = 0\n args['predicted_inverse_dynamics_loss_weight'] = 1\n args['future_prediction_loss_weight'] = 1\n\n log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)\n # NOTE: common_env_metadata_dict may be modified by the calls to load_data below.\n\n # Load data: target-task trajectories\n target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories'])\n target_task = target_tasks[0]\n\n data = load_data(log,\n data_dir,\n target_tasks,\n max_trajectories=target_max_trajs,\n discount=args['discount'],\n camera_names=camera_names,\n image_size=args['image_size'],\n target_frame_rate=args['target_frame_rate'],\n normalize_rewards=args['normalize_reward'],\n reward_type=args['reward_type'],\n common_env_metadata_dict=common_env_metadata_dict,\n data_shuffling_rng=data_shuffling_rng)\n\n assert len(data.keys()) == 1, f\"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}\"\n #assert args['validation_tasks'] is None, f\"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}.\"\n\n # Train/test split\n # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate'\n if args['best_metric'] == 'evaluation/success_rate':\n print(\"WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.\")\n train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])\n target_all_data = TrajectoryDataset(data[target_task.name], camera_names, contextual=True)\n print(f\"Total target trajectories: {len(target_all_data)}\")\n target_train_data = TrajectoryDataset(train_trajectories, camera_names, contextual=True)\n target_val_data = TrajectoryDataset(val_trajectories, camera_names, contextual=True)\n del train_trajectories\n del val_trajectories\n log(f'{len(target_train_data.trajectories)} train and {len(target_val_data.trajectories)} validation trajectories')\n\n # Instantiate a model\n model, trainable_param_spec = setup_model(args,\n target_task,\n log,\n device,\n camera_names,\n modalities_to_mask,\n data_dir,\n args['bc_learning_mode'])\n\n # If the number of training iterations is 0, we are being asked to just evaluate the model\n if args['max_iters'] == 0:\n print(\"--------------- RUNNING IN EVALUATION MODE ----------------\")\n # We are in the evaluation mode\n # Note that for evaluation, we are using *all* the demonstration data for the task, not just the validation data.\n # This is because get_success_rate_evaluator will use the demo trajectories only for sampling the goals/contexts.\n # We allow using the same contexts during both training and evaluation.\n evaluator = get_success_rate_evaluator(target_task, target_all_data, common_env_metadata_dict, args, log.dir)\n dummy_iter_num = 0\n outputs = evaluator(model, dummy_iter_num)\n\n logs = dict()\n for k, v in outputs.items():\n logs[f'evaluation/{k}'] = [v]\n\n for k, v in logs.items():\n print(f'{k}: {v[0]}')\n\n print(\"--------------- FINISHED EVALUATION ----------------\")\n return logs\n\n # Otherwise, prepare the model for training\n trainable_params = set_trainable_params(model, trainable_param_spec, log)\n\n # Instantiate a batch sampler over the training data we loaded above\n if args['best_metric'] == 'evaluation/neg_val_error':\n batch_sampler = setup_batch_sampler(target_train_data, args['context_style'], args, device)\n else:\n # Recall from above that if the metric is success rate, we use all target task data for training,\n # without allocating any of this data for validation.\n batch_sampler = setup_batch_sampler(target_all_data, args['context_style'], args, device)\n\n # Setup a model evaluator\n eval_fn_dict = {'evaluation/neg_val_error': get_validation_error_evaluator(target_val_data, args, device),\n 'evaluation/success_rate': get_success_rate_evaluator(target_task, target_all_data, common_env_metadata_dict, args, log.dir)}\n eval_fns = [eval_fn_dict[args['best_metric']]]\n\n # Instantiate a trainer\n trainer = setup_trainer(batch_sampler,\n args['finetune_learning_rate'],\n eval_fns,\n model,\n trainable_params,\n args)\n\n if log_to_wandb:\n group_name = f'{args[\"robot\"]}_target-{target_task.name}'\n setup_wandb_logging(group_name, args)\n\n # Run training\n model_name_prefix = 'finet_' + args['model'] + target_task.name + '__'\n metric_values = run_training(trainer, model, args['finetune_steps_per_iter'], model_name_prefix, args, log, log_to_wandb, timer)\n return metric_values"
}
] | from PLEX.pretraining_EX import pretrain_EX
from PLEX.pretraining_PL import pretrain_PL
from PLEX.finetuning import finetune
import argparse
import random | 7,155 |
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--training_stage", type=str, default='ft', help = "The training stage. Can be 'ex' (pretaining the EXecutor), 'pl' (pretraining the PLanner), or 'ft' (finetuning a pretrained PLEX)")
parser.add_argument("-d", "--data_dir", type=str, default='store/data', help = "Directory path where the training data is.")
parser.add_argument("-l", "--log_dir", type=str, default='store/logs', help = "Directory path where to output logs and model checkpoints.")
parser.add_argument("-m", "--model_file", type=str, default=None, help = "Model file path.")
parser.add_argument("-t", "--target_task", type=str, default=None, help = "Directory path where the target task's data is. NOTE: applicable only if the training stage is 'ft' (finetuning).")
parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).")
args = parser.parse_args()
common_flags = ['--relative_position_encodings', '--bc_learning_mode']
common_args = {
'seed': str(random.randint(0, 1000000)),
'data_dir': args.data_dir,
'log_dir': args.log_dir,
'robot': 'Sawyer',
'camera_names': 'corner',
'modalities_to_mask': 'proprio,action',
'record_camera': 'corner',
'image_size': '84',
'reward_type': 'sparse',
'image_encoder_arch': 'resnet18',
'impute_style': 'trainable',
'embed_dim': '256',
'future_step': '1',
'activation_function': 'relu',
'device': 'cuda',
'dropout': '0.2',
'weight_decay': '1e-05',
'warmup_steps': '200',
'batch_size': '256',
'action_output_type': 'deterministic',
'model': 'PLEX',
'obs_pred.n_layer': '3',
'obs_pred.n_head': '4',
'obs_pred.K': '30',
'inv_d_pred.n_layer': '3',
'inv_d_pred.n_head': '4',
'inv_d_pred.K': '30'
}
common_pretraining_flags = ['--no_video']
common_pretraining_args = {
'pretrain_learning_rate': '0.0005',
'pretrain_steps_per_iter': '250',
'num_steps_per_ft_eval_iter': '0',
'best_metric': 'evaluation/neg_val_error',
'validation_frac': '1.0',
'validation_samples': '30',
# Validation tasks can be any MW tasks -- we don't use validation error to stop training.
# We use the target tasks as validation tasks.
'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/',
}
cmdline_args = common_flags
for k in common_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_args[k])
if args.training_stage == 'ex':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the executor, use 75 play trajectories per task.
'--max_pretrain_trajectories', '75',
# During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen.
'--image_encoder_tune_style', 'all',
'--obs_pred.transformer_tune_style', 'none',
'--inv_d_pred.transformer_tune_style', 'all',
# Use the dynamics data from Meta-World ML50's 5 downstream environments.
'--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/',
])
pretrain_EX(cmdline_args)
elif args.training_stage == 'pl':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the planner, use all (100) available video demonstrations per task.
'--max_pretrain_trajectories', 100,
'--context_style', 'first-success',
'--context_from_diff_traj',
# During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself.
'--image_encoder_tune_style', 'none',
'--obs_pred.transformer_tune_style', 'all',
'--inv_d_pred.transformer_tune_style', 'none',
# For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks.
'--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/',
'--load_path', args.model_file
])
|
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--training_stage", type=str, default='ft', help = "The training stage. Can be 'ex' (pretaining the EXecutor), 'pl' (pretraining the PLanner), or 'ft' (finetuning a pretrained PLEX)")
parser.add_argument("-d", "--data_dir", type=str, default='store/data', help = "Directory path where the training data is.")
parser.add_argument("-l", "--log_dir", type=str, default='store/logs', help = "Directory path where to output logs and model checkpoints.")
parser.add_argument("-m", "--model_file", type=str, default=None, help = "Model file path.")
parser.add_argument("-t", "--target_task", type=str, default=None, help = "Directory path where the target task's data is. NOTE: applicable only if the training stage is 'ft' (finetuning).")
parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).")
args = parser.parse_args()
common_flags = ['--relative_position_encodings', '--bc_learning_mode']
common_args = {
'seed': str(random.randint(0, 1000000)),
'data_dir': args.data_dir,
'log_dir': args.log_dir,
'robot': 'Sawyer',
'camera_names': 'corner',
'modalities_to_mask': 'proprio,action',
'record_camera': 'corner',
'image_size': '84',
'reward_type': 'sparse',
'image_encoder_arch': 'resnet18',
'impute_style': 'trainable',
'embed_dim': '256',
'future_step': '1',
'activation_function': 'relu',
'device': 'cuda',
'dropout': '0.2',
'weight_decay': '1e-05',
'warmup_steps': '200',
'batch_size': '256',
'action_output_type': 'deterministic',
'model': 'PLEX',
'obs_pred.n_layer': '3',
'obs_pred.n_head': '4',
'obs_pred.K': '30',
'inv_d_pred.n_layer': '3',
'inv_d_pred.n_head': '4',
'inv_d_pred.K': '30'
}
common_pretraining_flags = ['--no_video']
common_pretraining_args = {
'pretrain_learning_rate': '0.0005',
'pretrain_steps_per_iter': '250',
'num_steps_per_ft_eval_iter': '0',
'best_metric': 'evaluation/neg_val_error',
'validation_frac': '1.0',
'validation_samples': '30',
# Validation tasks can be any MW tasks -- we don't use validation error to stop training.
# We use the target tasks as validation tasks.
'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/',
}
cmdline_args = common_flags
for k in common_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_args[k])
if args.training_stage == 'ex':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the executor, use 75 play trajectories per task.
'--max_pretrain_trajectories', '75',
# During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen.
'--image_encoder_tune_style', 'all',
'--obs_pred.transformer_tune_style', 'none',
'--inv_d_pred.transformer_tune_style', 'all',
# Use the dynamics data from Meta-World ML50's 5 downstream environments.
'--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/',
])
pretrain_EX(cmdline_args)
elif args.training_stage == 'pl':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the planner, use all (100) available video demonstrations per task.
'--max_pretrain_trajectories', 100,
'--context_style', 'first-success',
'--context_from_diff_traj',
# During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself.
'--image_encoder_tune_style', 'none',
'--obs_pred.transformer_tune_style', 'all',
'--inv_d_pred.transformer_tune_style', 'none',
# For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks.
'--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/',
'--load_path', args.model_file
]) | pretrain_PL(cmdline_args) | 1 | 2023-11-06 09:38:09+00:00 | 8k |
mitre/arlin | tests/conftest.py | [
{
"identifier": "XRLDataset",
"path": "arlin/dataset/xrl_dataset.py",
"snippet": "class XRLDataset:\n \"\"\"Class to store experiences from running a policy in an environment.\"\"\"\n\n def __init__(\n self,\n environment: gym.Env,\n collector: BaseDataCollector = RandomDataCollector,\n seed: int = 12345,\n ):\n \"\"\"Initialize an XRLDataset.\n\n Args:\n environment (gym.Env): Environment to run the policy in.\n collector (BaseDataCollector, optional): Collector we want to use to collect\n our data. Defaults to RandomDataCollector.\n seed (int, optional): Sed for episode creation. Defaults to 12345.\n \"\"\"\n self.env = environment\n self.collector = collector\n self.seed = seed\n\n self.num_datapoints = 0\n self.analyzed = False\n\n for field in dataclasses.fields(self.collector.datapoint_cls):\n if not hasattr(self, field.name):\n setattr(self, field.name, np.array([], dtype=np.float64))\n\n def __len__(self) -> int:\n \"\"\"Number of transitions in the dataset.\n\n Returns:\n int: Number of transitions in the dataset\n \"\"\"\n return self.num_datapoints\n\n def fill(self, num_datapoints: int = 50000, randomness: float = 0.0) -> None:\n \"\"\"Add transitions to this dataset.\n\n Args:\n num_datapoints (int, optional): Number of datapoints to add.\n Defaults to 50000.\n randomness (float, optional): How much randomness do we want when taking\n actions. Defaults to 0.0.\n \"\"\"\n logging.info(f\"Collecting {num_datapoints} datapoints.\")\n collected_datapoints = 0\n num_episodes = 0\n datapoint_list = []\n self._episode_lens = []\n trunc_count = 0\n while collected_datapoints < num_datapoints:\n datapoints, trunc = self._collect_episode(\n seed=self.seed + num_episodes + trunc_count, randomness=randomness\n )\n if trunc:\n logging.debug(\"\\tSkipping episode due to truncation.\")\n trunc_count += 1\n if trunc_count >= 5:\n err_str = (\n \"Too many truncated episodes in a row identified - \"\n + \"please try modifying the randomness value.\"\n )\n raise RuntimeError(err_str)\n continue\n trunc_count = 0\n datapoint_list += datapoints\n\n collected_datapoints += len(datapoints)\n num_episodes += 1\n\n logging.info(\n f\"\\tEpisode {num_episodes} |\"\n f\" Collected: {len(datapoints)} |\"\n f\" Total: {collected_datapoints}\"\n )\n\n logging.info(f\"Collected {collected_datapoints} datapoints total.\")\n if collected_datapoints > num_datapoints:\n num_extra = collected_datapoints - num_datapoints\n logging.debug(\n f\"{num_extra} datapoint(s) have been collected for cleaner MDP creation.\"\n )\n\n self._append_datapoints(datapoint_list)\n self._analyze_dataset()\n self.num_datapoints += collected_datapoints\n\n def _collect_episode(\n self, seed: int, randomness: float = 0.0\n ) -> Tuple[List[Type[BaseDatapoint]], bool]:\n \"\"\"Collect datapoints from a single episode.\n\n Args:\n seed (int): Seed for the episode.\n randomness (float, optional): How much randomness do we want when taking\n actions. Defaults to 0.0.\n\n Returns:\n Tuple[List[Type[BaseDatapoint]], bool]: Datapoints, whether this episode was\n truncated or not\n \"\"\"\n ep_datapoints = []\n obs, _ = self.env.reset(seed=seed)\n self.env.action_space.seed(seed)\n step = 0\n render = self.env.render()\n rng = np.random.default_rng(seed)\n term = False\n trunc = False\n\n while True:\n take_rand_action = rng.random() <= randomness\n\n if step == 0:\n take_rand_action = False\n\n if take_rand_action:\n action = self.env.action_space.sample()\n else:\n datapoint, action = self.collector.collect_internal_data(observation=obs)\n\n new_obs, reward, term, trunc, _ = self.env.step(action)\n\n datapoint.add_base_data(obs, action, reward, term, trunc, step, render)\n ep_datapoints.append(datapoint)\n render = self.env.render()\n\n step += 1\n obs = new_obs\n\n if term or trunc:\n break\n\n if term:\n self._episode_lens += [step] * len(ep_datapoints)\n return ep_datapoints, trunc\n\n def _append_datapoints(self, datapoints: List[Type[BaseDatapoint]]):\n \"\"\"Append the given datapoints to the dataset.\n\n Args:\n datapoints (List[Type[BaseDatapoint]]): Datapoints to add to the dataset\n \"\"\"\n start = time.time()\n field_names = [i.name for i in dataclasses.fields(self.collector.datapoint_cls)]\n\n data_dict = {i: [] for i in field_names}\n\n for i in range(len(datapoints)):\n datapoint = datapoints[i]\n\n for field_name in field_names:\n val = getattr(datapoint, field_name)\n data_dict[field_name].append(val)\n\n for field_name in field_names:\n cur_value = getattr(self, field_name)\n new_data = np.array(data_dict[field_name])\n\n if cur_value.size == 0:\n setattr(self, field_name, new_data)\n else:\n updated_value = np.concatenate([cur_value, new_data])\n setattr(self, field_name, updated_value)\n end = time.time()\n\n logging.debug(f\"Converting datapoints took {(end - start) / 60} minutes.\")\n\n def _init_analyze(self):\n \"\"\"Initialize the additional analysis metrics.\"\"\"\n logging.info(\"Initializing analytics variables.\")\n self.total_rewards = np.array([], dtype=np.float64)\n self.start_indices = np.array([], dtype=np.int8)\n self.term_indices = np.array([], dtype=np.int8)\n self.trunc_indices = np.array([], dtype=np.int8)\n self.unique_state_indices = np.array([], dtype=np.int8)\n self.state_mapping = np.array([], dtype=np.int8)\n\n self.steps = self.steps.astype(\"float32\")\n\n def _analyze_dataset(self):\n \"\"\"Add additional analysis metrics to the dataset that we can't collect.\"\"\"\n if not self.analyzed:\n self._init_analyze()\n\n logging.info(\"Extracting necessary additional data from dataset.\")\n self._set_total_rewards()\n self._set_episode_prog_indices()\n self._normalize_steps()\n self._set_distinct_state_data()\n logging.info(\"Done setting dataset analysis variables.\")\n self.analyzed = True\n\n def _set_total_rewards(self):\n \"\"\"Add information about the total reward received at each step.\"\"\"\n logging.info(\"\\tSetting self.total_rewards.\")\n\n total_rewards = []\n\n cur_total = 0\n for i in range(self.num_datapoints, len(self.rewards)):\n cur_total += self.rewards[i]\n total_rewards.append(cur_total)\n\n if self.terminateds[i] or self.truncateds[i]:\n cur_total = 0\n\n self.total_rewards = np.concatenate([self.total_rewards, np.array(total_rewards)])\n\n def _set_episode_prog_indices(self):\n \"\"\"Extract episode start and termination indices from the dataset.\"\"\"\n\n logging.info(\"\\tSetting self.start_indices.\")\n logging.info(\"\\tSetting self.term_indices.\")\n logging.info(\"\\tSetting self.trunc_indices.\")\n\n trunc_steps = self.steps[self.num_datapoints : len(self.steps)]\n trunc_terms = self.terminateds[self.num_datapoints : len(self.terminateds)]\n trunc_truncs = self.truncateds[self.num_datapoints : len(self.truncateds)]\n\n start_indices = np.where(trunc_steps == 0)[0] + self.num_datapoints\n term_indices = np.where(trunc_terms == 1)[0] + self.num_datapoints\n trunc_indices = np.where(trunc_truncs == 1)[0] + self.num_datapoints\n\n self.start_indices = np.concatenate([self.start_indices, start_indices])\n self.term_indices = np.concatenate([self.term_indices, term_indices])\n self.trunc_indices = np.concatenate([self.trunc_indices, trunc_indices])\n\n if len(start_indices) == 0:\n logging.warning(\"No start indices identified.\")\n\n if len(term_indices) == 0:\n logging.warning(\"No terminated indices identified.\")\n\n if len(trunc_indices) == 0:\n logging.warning(\"No truncated indices identified.\")\n\n def _normalize_steps(self):\n \"\"\"Normalize the steps between 0 and 1 depending on time in episode taken.\"\"\"\n logging.info(\"\\tNormalizing self.steps.\")\n # Only get the data from the most recent fill\n cur_fill_steps = deepcopy(self.steps[self.num_datapoints : len(self.steps)])\n normalized_steps = []\n\n for i in range(len(cur_fill_steps)):\n step = cur_fill_steps[i]\n normalized_steps.append(step / self._episode_lens[i])\n\n self.steps[self.num_datapoints : len(self.steps)] = normalized_steps\n\n def _set_distinct_state_data(self):\n \"\"\"Extract the unique state indices and corresponding state mapping to identify\n unique observations in the dataset. T-SNE has trouble with duplicate states so\n mapping unique states together is beneficial.\n \"\"\"\n\n logging.info(\"\\tSetting self.unique_state_indices.\")\n logging.info(\"\\tSetting self.state_mapping.\")\n\n outputs = np.unique(\n self.observations, return_index=True, return_inverse=True, axis=0\n )\n\n _, unique_state_indices, state_mapping = outputs\n self.unique_state_indices = unique_state_indices\n self.state_mapping = state_mapping\n\n def get_dict(self) -> Dict[str, List[np.ndarray]]:\n \"\"\"Get a dictionary representation of this dataset.\n\n Returns:\n Dict[str, List[np.ndarray]]: Dictionary representation of this dataset.\n \"\"\"\n out_dict = {}\n\n for field in dataclasses.fields(self.collector.datapoint_cls):\n out_dict[field.name] = np.array(getattr(self, field.name))\n\n if self.analyzed:\n out_dict[\"total_rewards\"] = self.total_rewards\n out_dict[\"start_indices\"] = self.start_indices\n out_dict[\"term_indices\"] = self.term_indices\n out_dict[\"trunc_indices\"] = self.trunc_indices\n out_dict[\"unique_state_indices\"] = self.unique_state_indices\n out_dict[\"state_mapping\"] = self.state_mapping\n\n return out_dict\n\n def save(self, file_path: str) -> None:\n \"\"\"\n Save dictionary of datapoints to the given file_path.\n\n Args:\n - file_path str: Filepath to save XRL dataset to.\n \"\"\"\n\n if not file_path[-4:] == \".npz\":\n file_path += \".npz\"\n\n logging.info(f\"Saving datapoints to {file_path}...\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n start = time.time()\n\n np.savez_compressed(file_path, **self.get_dict())\n end = time.time()\n\n file_size = round(os.path.getsize(file_path) >> 20, 2)\n logging.debug(f\"\\tFile size: {file_size} MB\")\n logging.debug(f\"\\tSaved dataset in {(end - start) % 60} minutes.\")\n\n def load(self, load_path: str) -> None:\n \"\"\"Load a XRLDataset from the given path.\n\n Args:\n load_path (str): Path to saved XRLDataset.\n\n Raises:\n ValueError: Missing a required dataset key.\n ValueError: There is no data to load.\n ValueError: Input keys do not have the same number of datapoints.\n \"\"\"\n dataset = np.load(load_path)\n\n lens = set()\n for key in [\n \"observations\",\n \"actions\",\n \"rewards\",\n \"terminateds\",\n \"truncateds\",\n \"steps\",\n \"renders\",\n ]:\n if key not in dataset:\n raise ValueError(f\"Invalid dataset - missing {key}.\")\n if len(dataset[key]) == 0:\n raise ValueError(f\"Key {key} has no associated data.\")\n lens.add(len(dataset[key]))\n\n if len(lens) > 1:\n raise ValueError(\"Input keys do not have the same number of datapoints.\")\n\n for key in dataset:\n setattr(self, key, dataset[key])\n\n self.num_datapoints = len(dataset[\"observations\"])\n\n try:\n getattr(self, \"total_rewards\")\n self.analyzed = True\n except Exception:\n self.analyzed = False"
},
{
"identifier": "RandomDataCollector",
"path": "arlin/dataset/collectors/base_collectors.py",
"snippet": "class RandomDataCollector(BaseDataCollector):\n \"\"\"Data collection when the agent is taking random actions.\"\"\"\n\n def __init__(self, datapoint_cls: Type[BaseDatapoint], environment: gym.Env):\n \"\"\"Initialize a RandomDataCollector object.\n\n Args:\n datapoint_cls (Type[BaseDatapoint]): Class of datapoint we are collecting.\n environment (gym.Env): Environment the policy is interacting with.\n \"\"\"\n super().__init__(datapoint_cls=datapoint_cls)\n self.env = environment\n\n def collect_internal_data(\n self, observation: np.ndarray\n ) -> Tuple[Type[BaseDatapoint], int]:\n action = self.env.action_space.sample()\n return self.datapoint_cls(), action"
},
{
"identifier": "SB3PPODataCollector",
"path": "arlin/dataset/collectors/sb3_collectors.py",
"snippet": "class SB3PPODataCollector(BaseDataCollector):\n \"\"\"Data collector for a model trained with PPO in stable-baselines3.\"\"\"\n\n def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: BasePolicy):\n super().__init__(datapoint_cls=datapoint_cls)\n self.policy = policy\n\n def collect_internal_data(\n self, observation: np.ndarray\n ) -> Tuple[type[BaseDatapoint], int]:\n with th.no_grad():\n obs = th.Tensor(np.expand_dims(observation, 0))\n policy_dist = self.policy.get_distribution(obs)\n action = policy_dist.get_actions(deterministic=True).item()\n probs = policy_dist.distribution.probs\n value = self.policy.predict_values(obs)\n\n features = self.policy.extract_features(obs)\n if self.policy.share_features_extractor:\n latent_pi, latent_vf = self.policy.mlp_extractor(features)\n pi_features = features\n vf_features = features\n else:\n pi_features, vf_features = features\n latent_pi = self.policy.mlp_extractor.forward_actor(pi_features)\n latent_vf = self.policy.mlp_extractor.forward_critic(vf_features)\n\n datapoint = self.datapoint_cls(\n latent_actors=th.squeeze(latent_pi).numpy(),\n latent_critics=th.squeeze(latent_vf).numpy(),\n dist_probs=th.squeeze(probs).numpy(),\n critic_values=th.squeeze(value).item(),\n pi_features=th.squeeze(pi_features).numpy(),\n vf_features=th.squeeze(vf_features).numpy(),\n )\n\n return datapoint, action"
},
{
"identifier": "BaseDatapoint",
"path": "arlin/dataset/collectors/datapoints.py",
"snippet": "class BaseDatapoint:\n \"\"\"Base datapoint with traditional RL data that is common to all algorithms.\"\"\"\n\n observations: Optional[np.ndarray] = None\n actions: Optional[int] = None\n rewards: Optional[float] = None\n terminateds: Optional[bool] = None\n truncateds: Optional[bool] = None\n steps: Optional[float] = None\n renders: Optional[np.ndarray] = None\n\n def __eq__(self, other: Any):\n if not isinstance(other, BaseDatapoint):\n return False\n\n self_fields = [i.name for i in dataclasses.fields(self)]\n other_fields = [i.name for i in dataclasses.fields(other)]\n\n if not self_fields == other_fields:\n return False\n\n for field in self_fields:\n if not np.array_equal(getattr(self, field), getattr(other, field)):\n return False\n\n return True\n\n def add_base_data(\n self,\n obs: np.ndarray,\n action: int,\n reward: float,\n terminated: bool,\n truncated: bool,\n step: float,\n render: np.ndarray,\n ):\n \"\"\"Add the base RL data to this Datapoint object.\n\n Args:\n obs (np.ndarray): Current observation\n action (int): Action taken\n reward (float): Reward received\n terminated (bool): Did the episode end\n truncated (bool): Did we run out of steps\n step (float): Current step of this data\n render (np.ndarray): Render of the environment state\n \"\"\"\n self.observations = obs\n self.actions = action\n self.rewards = reward\n self.terminateds = terminated\n self.truncateds = truncated\n self.steps = step\n self.renders = render"
},
{
"identifier": "SB3PPODatapoint",
"path": "arlin/dataset/collectors/datapoints.py",
"snippet": "class SB3PPODatapoint(BaseDatapoint):\n \"\"\"Datapoint for a PPO algorithm trained in stable-baselines3.\"\"\"\n\n latent_actors: Optional[np.ndarray] = None\n latent_critics: Optional[np.ndarray] = None\n dist_probs: Optional[np.ndarray] = None\n critic_values: Optional[float] = None\n pi_features: Optional[np.ndarray] = None\n vf_features: Optional[np.ndarray] = None"
},
{
"identifier": "generate_clusters",
"path": "arlin/generation.py",
"snippet": "def generate_clusters(\n dataset: XRLDataset,\n start_cluster_keys: List[str],\n intermediate_cluster_keys: List[str],\n term_cluster_keys: List[str],\n num_clusters: int,\n seed: Optional[int] = None,\n) -> Tuple[np.ndarray, object, object, object]:\n \"\"\"Generate clusters from the given XRLDataset.\n\n NOTE: Order of the keys matters - ensure the data passed in during inference time\n matches the order of the keys passed in during cluster generation.\n\n Args:\n dataset (XRLDataset): XRLDataset to cluster on.\n start_cluster_keys (List[str]): Keys to cluster initial states on\n intermediate_cluster_keys (List[str]): Keys to cluster intermediate states on\n term_cluster_keys (List[str]): keys to cluster terminal states on\n num_clusters (int): Number of intermediate clusters to find in intermediate\n (not intitial or terminal) states\n seed (Optional[int], optional): Seed for clustering. Defaults to None.\n\n Raises:\n ValueError: No initial states found.\n ValueError: No terminal states found.\n ValueError: Not enough datapoints given (< num_clusters)\n\n Returns:\n Tuple(np.ndarray, object, object, object):\n Cluster values for each datapoint, initial cluster estimator, intermediate cluster\n estimator, terminal cluster estimator\n \"\"\"\n logging.info(f\"Generating {num_clusters} clusters.\")\n\n start = time.time()\n\n (cluster_on_start, cluster_on_mid, cluster_on_term, mid_mask) = _get_cluster_ons(\n dataset, start_cluster_keys, intermediate_cluster_keys, term_cluster_keys\n )\n\n if len(cluster_on_start) == 0:\n raise ValueError(\"No initial indices found! Cancelling clustering.\")\n else:\n start_algo = MeanShift()\n start_clusters = start_algo.fit(cluster_on_start)\n start_clusters = start_clusters.labels_\n\n if len(cluster_on_term) == 0:\n raise ValueError(\"No terminal indices found! Cancelling clustering.\")\n else:\n term_algo = MeanShift()\n term_clusters = term_algo.fit(cluster_on_term)\n term_clusters = term_clusters.labels_\n\n if num_clusters > len(cluster_on_mid):\n raise ValueError(\n f\"Not enough datapoints {len(cluster_on_mid)} to create \\\n {num_clusters} clusters.\"\n )\n\n mid_algo = KMeans(n_clusters=num_clusters, random_state=seed, n_init=\"auto\")\n mid_clusters = mid_algo.fit(cluster_on_mid)\n mid_clusters = mid_clusters.labels_\n\n n_start_clusters = len(set(start_clusters))\n\n start_clusters = np.array([x + num_clusters for x in start_clusters], dtype=int)\n term_clusters = np.array(\n [x + n_start_clusters + num_clusters for x in term_clusters], dtype=int\n )\n\n clusters = np.empty([len(dataset.terminateds)], dtype=int)\n clusters[mid_mask] = mid_clusters\n clusters[dataset.start_indices] = start_clusters\n clusters[dataset.term_indices] = term_clusters\n\n end = time.time()\n logging.info(f\"\\tSuccessfully generated clusters in {end - start} seconds.\")\n\n return clusters, start_algo, mid_algo, term_algo"
},
{
"identifier": "generate_embeddings",
"path": "arlin/generation.py",
"snippet": "def generate_embeddings(\n dataset: XRLDataset,\n activation_key: str,\n perplexity: int,\n n_train_iter: int,\n output_dim: int = 2,\n seed: int = 12345,\n) -> np.ndarray:\n \"\"\"Generate TSNE embeddings from the given XRLDataset.\n\n Args:\n dataset (XRLDataset): XRLDataset generated from an RL policy.\n activation_key (str): Data that we want to embed on.\n perplexity (int): Perplexity value for TSNE\n n_train_iter (int): Number of training iterations for TSNE\n output_dim (int, optional): Output dimensions of the embeddings. Defaults to 2.\n seed (int, optional): Seed for TSNE. Defaults to 12345.\n\n Returns:\n np.ndarray: TSNE embeddings\n \"\"\"\n logging.info(f\"Generating embeddings from dataset.{activation_key}.\")\n\n start = time.time()\n\n embedder = TSNE(\n n_jobs=4,\n n_components=output_dim,\n perplexity=perplexity,\n n_iter=n_train_iter,\n verbose=1,\n random_state=seed,\n )\n\n activations = getattr(dataset, activation_key)\n unique_activations = activations[dataset.unique_state_indices]\n\n embeddings = embedder.fit_transform(unique_activations)\n embeddings = [embeddings[index] for index in dataset.state_mapping]\n\n end = time.time()\n\n logging.info(f\"\\tSuccessfully generated embeddings in {(end - start) % 60} minutes.\")\n\n return np.array(embeddings)"
}
] | import gymnasium as gym
import pytest
from stable_baselines3 import PPO
from arlin.dataset import XRLDataset
from arlin.dataset.collectors import RandomDataCollector, SB3PPODataCollector
from arlin.dataset.collectors.datapoints import BaseDatapoint, SB3PPODatapoint
from arlin.generation import generate_clusters, generate_embeddings | 5,707 |
@pytest.fixture
def env():
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
return env
@pytest.fixture
def random_dataset(env):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = RandomDataCollector(datapoint_cls=BaseDatapoint, environment=env)
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
def random_embeddings(random_dataset):
|
@pytest.fixture
def env():
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
return env
@pytest.fixture
def random_dataset(env):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = RandomDataCollector(datapoint_cls=BaseDatapoint, environment=env)
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
def random_embeddings(random_dataset): | embeddings = generate_embeddings( | 6 | 2023-11-08 13:57:45+00:00 | 8k |
Giftify-Bot/Giftify-Bot | cogs/raffles/raffle.py | [
{
"identifier": "Giftify",
"path": "bot.py",
"snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler,\r\n pool: asyncpg.Pool,\r\n session: aiohttp.ClientSession,\r\n amari_client: AmariClient,\r\n ) -> None:\r\n self._log_handler = log_handler\r\n self._pool = pool\r\n self._session = session\r\n self._amari_client = amari_client\r\n\r\n intents = discord.Intents(messages=True, emojis=True, guilds=True)\r\n allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False)\r\n member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents)\r\n\r\n sentry_sdk.init(\r\n dsn=os.environ[\"SENTRY_DSN\"],\r\n integrations=[\r\n LoggingIntegration(\r\n level=logging.INFO,\r\n event_level=logging.ERROR,\r\n )\r\n ],\r\n traces_sample_rate=1.0,\r\n )\r\n\r\n super().__init__(\r\n command_prefix=commands.when_mentioned,\r\n tree_cls=CommandTree,\r\n help_command=None,\r\n description=\"A giveaway bot for hosting giveaways.\",\r\n intents=intents,\r\n allowed_mentions=allowed_mentions,\r\n chunk_guilds_at_startup=False,\r\n max_messages=None,\r\n activity=discord.CustomActivity(name=\"\\N{LINK SYMBOL} https://giftifybot.vercel.app\"),\r\n member_cache_flags=member_cache_flags,\r\n owner_ids=OWNER_IDS,\r\n )\r\n\r\n @property\r\n def log_handler(self) -> LogHandler:\r\n return self._log_handler\r\n\r\n @property\r\n def pool(self) -> asyncpg.Pool:\r\n return self._pool\r\n\r\n @property\r\n def session(self) -> aiohttp.ClientSession:\r\n return self._session\r\n\r\n @property\r\n def amari_client(self) -> AmariClient:\r\n return self._amari_client\r\n\r\n @property\r\n def timer_cog(self) -> TimerManager:\r\n return self.get_cog(\"TimerManager\") # type: ignore\r\n\r\n def run(self) -> None:\r\n raise NotImplementedError(\"Please use `.start()` instead.\")\r\n\r\n async def on_ready(self) -> None:\r\n self.log_handler.log.info(\"%s got a ready event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_resume(self) -> None:\r\n self.log_handler.log.info(\"%s got a resume event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:\r\n if isinstance(error, commands.CommandInvokeError):\r\n origin_ = error.original\r\n assert ctx.command is not None\r\n if not isinstance(origin_, discord.HTTPException):\r\n print(f\"In {ctx.command.qualified_name}:\", file=sys.stderr)\r\n traceback.print_tb(origin_.__traceback__)\r\n print(f\"{origin_.__class__.__name__}: {origin_}\", file=sys.stderr)\r\n sentry_sdk.capture_exception(error)\r\n\r\n async def start(self) -> None:\r\n await super().start(token=os.environ[\"TOKEN\"], reconnect=True)\r\n\r\n async def setup_hook(self) -> None:\r\n self.start_time: datetime.datetime = datetime.datetime.now(datetime.timezone.utc)\r\n\r\n self.bot_app_info = await self.application_info()\r\n self.owner_ids = OWNER_IDS\r\n\r\n async def get_or_fetch_user(self, user_id: int) -> Optional[discord.User]:\r\n \"\"\"Looks up a user in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n user_id: int\r\n The user ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[User]\r\n The user or None if not found.\r\n \"\"\"\r\n\r\n user = self.get_user(user_id)\r\n if user is not None:\r\n return user\r\n\r\n try:\r\n user = await self.fetch_user(user_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return user\r\n\r\n async def get_or_fetch_member(self, guild: discord.Guild, member_id: int) -> Optional[discord.Member]:\r\n \"\"\"Looks up a member in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n guild: Guild\r\n The guild to look in.\r\n member_id: int\r\n The member ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[Member]\r\n The member or None if not found.\r\n \"\"\"\r\n\r\n member = guild.get_member(member_id)\r\n if member is not None:\r\n return member\r\n\r\n shard: discord.ShardInfo = self.get_shard(guild.shard_id) # type: ignore # will never be None\r\n if shard.is_ws_ratelimited():\r\n try:\r\n member = await guild.fetch_member(member_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return member\r\n\r\n members = await guild.query_members(limit=1, user_ids=[member_id], cache=True)\r\n if not members:\r\n return None\r\n return members[0]\r"
},
{
"identifier": "Raffle",
"path": "models/raffles.py",
"snippet": "class Raffle:\n \"\"\"\n Represents a raffle object.\n\n Attributes\n ----------\n pool: asyncpg.Pool\n The PostgreSQL connection pool instance.\n guild: discord.Guild\n The guild (server) where the raffle is hosted.\n name: str\n The name of the raffle.\n winner: Optional[discord.Member]\n The member instance of the winner, or None if the raffle hasn't ended yet.\n deputy_roles: List[discord.Role]\n A list of roles associated with the raffle.\n deputy_members: List[discord.Member]\n A list of members associated with the raffle.\n tickets: Dict[discord.Member, int]\n A mapping of members to the number of tickets they have.\n \"\"\"\n\n def __init__(\n self,\n pool: asyncpg.Pool,\n *,\n guild: discord.Guild,\n name: str,\n winner: Optional[discord.Member],\n deputy_roles: List[discord.Role],\n deputy_members: List[discord.Member],\n tickets: Dict[discord.Member, int],\n ):\n self.pool = pool\n\n self.guild = guild\n self.name = name\n self.winner = winner\n self.deputy_roles = deputy_roles\n self.deputy_members = deputy_members\n self.tickets = tickets\n\n def __str__(self):\n return self.name\n\n def __repr__(self) -> str:\n return f\"<Raffle name={self.name} guild={self.guild} winner={self.winner}>\"\n\n def __hash__(self) -> int:\n return hash((self.name, self.guild))\n\n def __eq__(self, other: Raffle) -> bool:\n return self.name == other.name and self.guild == other.guild\n\n @classmethod\n async def from_record(cls, bot: Giftify, *, record: asyncpg.Record) -> Raffle:\n name = record[\"name\"]\n guild = bot.get_guild(record[\"guild\"])\n if guild is None:\n raise RaffleError(\"The guild having the raffle was not found.\")\n\n winner_id = record[\"winner\"]\n winner: Optional[discord.Member] = (\n (await bot.get_or_fetch_member(guild, winner_id) or FakeMember(winner_id))\n if winner_id\n else None\n ) # type: ignore\n\n deputy_roles = [guild.get_role(role_id) for role_id in record[\"deputy_roles\"]]\n deputy_members = [\n await bot.get_or_fetch_member(guild, member_id)\n for member_id in record[\"deputy_members\"]\n ]\n\n tickets = {\n await bot.get_or_fetch_member(guild, int(member_id)): num_tickets\n for member_id, num_tickets in record[\"tickets\"].items()\n }\n\n return cls(\n bot.pool,\n guild=guild,\n name=name,\n winner=winner,\n deputy_roles=filter_none(deputy_roles),\n deputy_members=filter_none(deputy_members),\n tickets=filter_none(tickets),\n )\n\n async def roll(self) -> discord.Member:\n \"\"\"\n End the raffle and set the winner.\n \"\"\"\n members = list(self.tickets.keys())\n weights = list(self.tickets.values())\n\n self.winner = random.choices(members, weights, k=1)[0]\n\n await self.save()\n\n return self.winner\n\n async def add_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Add a deputy to the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be added.\n \"\"\"\n if isinstance(obj, discord.Member):\n if len(self.deputy_members) >= 25:\n raise RaffleError(\"You cannot add more than 25 deputy members.\")\n self.deputy_members.append(obj)\n elif isinstance(obj, discord.Role):\n if len(self.deputy_roles) >= 10:\n raise RaffleError(\"You cannot add more than 10 deputy roles.\")\n self.deputy_roles.append(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def remove_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Remove a deputy from the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be removed.\n \"\"\"\n if isinstance(obj, discord.Member):\n if obj not in self.deputy_members:\n raise RaffleError(\"That member is not a deputy.\")\n self.deputy_members.remove(obj)\n elif isinstance(obj, discord.Role):\n if obj not in self.deputy_roles:\n raise RaffleError(\"That role is not a deputy.\")\n self.deputy_roles.remove(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def add_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Add tickets to a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to add.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] += num_tickets\n else:\n self.tickets[member] = num_tickets\n\n await self.save()\n\n async def remove_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Remove tickets from a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to remove.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] -= num_tickets\n if self.tickets[member] <= 0:\n del self.tickets[member]\n\n await self.save()\n else:\n raise RaffleError(\n f\"That member does not have any tickets in {self.name} raffle.\"\n )\n\n async def save(self) -> None:\n \"\"\"\n Update raffle attributes in the database.\n \"\"\"\n query = \"\"\"\n INSERT INTO raffles (guild, name, winner, deputy_roles, deputy_members, tickets)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (guild, name)\n DO UPDATE SET winner = EXCLUDED.winner, deputy_roles = EXCLUDED.deputy_roles,\n deputy_members = EXCLUDED.deputy_members, tickets = EXCLUDED.tickets;\n \"\"\"\n await self.pool.execute(\n query,\n self.guild.id,\n self.name,\n self.winner.id if self.winner else None,\n [role.id for role in self.deputy_roles],\n [member.id for member in self.deputy_members],\n {\n str(member.id): num_tickets\n for member, num_tickets in self.tickets.items()\n },\n )\n\n async def delete(self):\n \"\"\"\n Delete the raffle from the database.\n \"\"\"\n query = \"\"\"DELETE FROM raffles WHERE guild = $1 AND name = $2\"\"\"\n await self.pool.execute(query, self.guild.id, self.name)"
},
{
"identifier": "GIVEAWAY_EMOJI",
"path": "utils/constants.py",
"snippet": "GIVEAWAY_EMOJI = \"<:GiftifyTada:1098640605065777313>\""
},
{
"identifier": "MONEY_EMOJI",
"path": "utils/constants.py",
"snippet": "MONEY_EMOJI = \"<:GiftifyMoney:1122076961422975059>\""
},
{
"identifier": "BaseButtonPaginator",
"path": "utils/paginator.py",
"snippet": "class BaseButtonPaginator(Generic[T], discord.ui.View, abc.ABC):\n \"\"\"The base implementation of a button paginator. This class should be inherited\n then the custom instance defined.\n\n Parameters\n ----------\n entries: List[Any]\n The entries to paginate.\n per_page: int\n The amount of entries to show per page.\n clamp_pages: bool\n Whether to clamp the pages to the max and min page. This means that when the user\n reaches the max page, it will go back to the first page. Likewise, when the user\n reaches the first page, it will go back to the last page.\n target: Optional[Union[discord.Interaction, commands.Context]]\n The target interaction or context to use for the paginator. This is used to\n ensure that the user invoking the paginator is the same user that is interacting\n with the paginator.\n If this is ``None`` then the interaction check will always return True.\n \"\"\"\n\n def __init__(\n self,\n *,\n entries: List[T],\n per_page: int = 6,\n clamp_pages: bool = True,\n target: Optional[TargetType] = None,\n extras: Optional[Dict[Any, Any]] = None,\n ) -> None:\n super().__init__(timeout=180)\n self.entries: List[T] = entries\n self.per_page: int = per_page\n self.clamp_pages: bool = clamp_pages\n\n self.target: Optional[TargetType] = target\n self.extras = extras\n self.author: Optional[Union[discord.User, discord.Member]] = target and (\n target.user if isinstance(target, discord.Interaction) else target.author\n )\n self.bot: Optional[Giftify] = target and (\n target.client if isinstance(target, discord.Interaction) else target.bot\n )\n\n self._current_page_index = 0\n self.pages = [\n entries[i : i + per_page] for i in range(0, len(entries), per_page)\n ]\n\n @property\n def max_page(self) -> int:\n \"\"\"The max page count for this paginator.\"\"\"\n return len(self.pages)\n\n @property\n def min_page(self) -> int:\n \"\"\"The min page count for this paginator.\"\"\"\n return 1\n\n @property\n def current_page(self) -> int:\n \"\"\"The current page the user is on.\"\"\"\n return self._current_page_index + 1\n\n @property\n def total_pages(self) -> int:\n \"\"\"Returns the total amount of pages.\"\"\"\n return len(self.pages)\n\n @abc.abstractmethod\n def format_page(self, entries: List[T], /) -> discord.Embed:\n \"\"\"\n Used to make the embed that the user sees. This can be a coroutine or a regular\n function. This must be overwritten by the subclass.\n Parameters\n ----------\n entries: List[Any]\n A list of entries for the current page.\n Returns\n -------\n discord.Embed\n The embed for this page.\n \"\"\"\n raise NotImplementedError(\"Subclass did not overwrite format_page coro.\")\n\n async def embed(self) -> discord.Embed:\n \"\"\"\n A helper function to get the embed for the current page.\n Returns\n -------\n discord.Embed\n The embed for the current page.\n \"\"\"\n return await discord.utils.maybe_coroutine(\n self.format_page, self.pages[self._current_page_index]\n )\n\n async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]:\n \"\"\"\n The base interaction check for the given view.\n This will always return ``True`` if the target is ``None``, otherwise it will check\n that the user invoking the paginator is the same user that is interacting with the\n paginator.\n Parameters\n ----------\n interaction: discord.Interaction\n The interaction to check.\n Returns\n -------\n Optional[bool]\n The result of the interaction check. If this returns ``None`` then the interaction\n was responded to with an error message to the user.\n \"\"\"\n if self.target is None:\n return True\n\n assert self.author\n\n # Ensure this is the correct invoker\n if self.author.id != interaction.user.id:\n return await interaction.response.send_message(\n \"Hey, this isn't yours!\", ephemeral=True\n )\n\n # Ensure they invoke it in the correct channel.\n if (\n self.target.channel\n and interaction.channel\n and self.target.channel.id != interaction.channel.id\n ):\n return await interaction.response.send_message(\n \"Hey, this isn't in the right channel!\", ephemeral=True\n )\n\n return True\n\n def _switch_page(self, count: int, /) -> None:\n self._current_page_index += count\n\n if self.clamp_pages:\n if count < 0: # Going down\n if self._current_page_index < 0:\n self._current_page_index = self.max_page - 1\n elif count > 0: # Going up\n if self._current_page_index > self.max_page - 1: # - 1 for indexing\n self._current_page_index = 0\n\n return\n\n @discord.ui.button(emoji=ARROW_BACK_EMOJI)\n async def on_arrow_backward(\n self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]\n ) -> discord.InteractionMessage:\n \"\"\"\n The button to represent going backwards a page.\n Parameters\n ----------\n interaction: discord.Interaction\n The interaction created from the user invoking the button.\n button: discord.ui.Button\n The button that was pressed.\n \"\"\"\n await interaction.response.defer()\n\n self._switch_page(-1)\n\n embed = await self.embed()\n return await interaction.edit_original_response(embed=embed)\n\n @discord.ui.button(emoji=STOP_EMOJI)\n async def on_stop(\n self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]\n ) -> discord.InteractionMessage:\n \"\"\"\n The button to represent stopping the paginator. This will disable all children\n to the view then edit the original message with the updated view.\n\n Parameters\n ----------\n interaction: discord.Interaction\n The interaction created from the user invoking the button.\n button: discord.ui.Button\n The button that was pressed.\n \"\"\"\n await interaction.response.defer()\n\n for child in self.children:\n child.disabled = True # type: ignore\n\n self.stop()\n\n return await interaction.edit_original_response(view=self)\n\n @discord.ui.button(emoji=ARROW_EMOJI)\n async def on_arrow_forward(\n self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator]\n ) -> discord.InteractionMessage:\n \"\"\"\n The button to represent going forward a page.\n Parameters\n ----------\n interaction: discord.Interaction\n The interaction created from the user invoking the button.\n button: discord.ui.Button\n The button that was pressed.\n \"\"\"\n await interaction.response.defer()\n\n self._switch_page(1)\n\n embed = await self.embed()\n return await interaction.edit_original_response(embed=embed)"
},
{
"identifier": "MentionablesTransformer",
"path": "utils/transformers.py",
"snippet": "class MentionablesTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, value: str\r\n ) -> List[Union[discord.Member, discord.Role]]:\r\n mentionables: List[Union[discord.Member, discord.Role]] = []\r\n\r\n ctx = await commands.Context.from_interaction(interaction)\r\n\r\n for mentionable_string in value.split():\r\n # Better way is to use commands.run_converters but we can't use it here.\r\n try:\r\n mentionable = await commands.RoleConverter().convert(\r\n ctx, mentionable_string.strip()\r\n )\r\n except commands.RoleNotFound:\r\n pass\r\n else:\r\n if mentionable_string == \"@everyone\":\r\n raise InvalidRolesPassed(\r\n f\"{mentionable_string!r} is not a valid member or role.\"\r\n )\r\n mentionables.append(mentionable)\r\n continue\r\n\r\n try:\r\n mentionable = await commands.MemberConverter().convert(\r\n ctx, mentionable_string.strip()\r\n )\r\n except commands.MemberNotFound:\r\n raise InvalidMentionablesPassed(\r\n f\"{mentionable_string!r} is not a valid member or role.\"\r\n )\r\n\r\n mentionables.append(mentionable)\r\n\r\n return mentionables\r"
},
{
"identifier": "RaffleTransformer",
"path": "utils/transformers.py",
"snippet": "class RaffleTransformer(app_commands.Transformer):\r\n async def transform(self, interaction: Interaction, value: str) -> Raffle:\r\n assert interaction.guild is not None\r\n\r\n raffle = await interaction.client.fetch_raffle(interaction.guild, value)\r\n if not raffle:\r\n raise InvalidRaffle(\r\n f\"The raffle of name {value} does not exist!\",\r\n )\r\n\r\n return raffle\r\n\r\n async def autocomplete(\r\n self,\r\n interaction: Interaction,\r\n current: str,\r\n ) -> List[app_commands.Choice[str]]:\r\n assert interaction.guild is not None\r\n\r\n return [\r\n app_commands.Choice(name=raffle.name, value=raffle.name)\r\n for raffle in await interaction.client.fetch_raffles(interaction.guild)\r\n if current.lower() in raffle.name.lower()\r\n ]\r"
},
{
"identifier": "Interaction",
"path": "utils/tree.py",
"snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r"
}
] | from typing import List, Optional, Tuple, Union
from discord import app_commands
from discord.app_commands import Range, Transform
from discord.ext import commands
from bot import Giftify
from models.raffles import Raffle
from utils.constants import GIVEAWAY_EMOJI, MONEY_EMOJI
from utils.paginator import BaseButtonPaginator
from utils.transformers import MentionablesTransformer, RaffleTransformer
from utils.tree import Interaction
import discord | 6,094 |
class RafflesPaginator(BaseButtonPaginator[Raffle]):
async def format_page(self, raffles: List[Raffle], /) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = "The raffles in this guild are:\n\n"
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['guild'].name}'s Raffles",
description=description,
color=self.bot.colour,
)
for i, raffle in enumerate(raffles):
embed.add_field(
name=f"`{i + 1}.` {raffle.name}",
value=(
f"Deputy Roles: {', '.join(role.mention for role in raffle.deputy_roles)}\n"
f"Deputy Members: {', '.join(member.mention for member in raffle.deputy_members)}\n"
f"Winner: {raffle.winner.mention if raffle.winner else None}\n"
f"Total Tickets: {sum(raffle.tickets.values())}\n"
),
inline=False,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class TicketsLeaderboardPaginator(BaseButtonPaginator[Tuple[discord.Member, int]]):
async def format_page(
self, tickets: List[Tuple[discord.Member, int]], /
) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = f"The tickets of {extras['name']} raffle are:\n\n"
for i, member_tickets in enumerate(tickets):
description += (
f"`{i + 1}.` {member_tickets[0].mention} - **{member_tickets[1]:,}**\n"
)
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['name'].title()} Raffle",
description=description,
color=self.bot.colour,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class RaffleBase(commands.GroupCog):
"""Cog containing admin commands for raffle management."""
bot: Giftify
@app_commands.command(name="create")
@app_commands.describe(
name="The unique name of the raffle.",
deputies="The list of members or roles who can manage the raffle.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def raffle_create(
self,
|
class RafflesPaginator(BaseButtonPaginator[Raffle]):
async def format_page(self, raffles: List[Raffle], /) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = "The raffles in this guild are:\n\n"
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['guild'].name}'s Raffles",
description=description,
color=self.bot.colour,
)
for i, raffle in enumerate(raffles):
embed.add_field(
name=f"`{i + 1}.` {raffle.name}",
value=(
f"Deputy Roles: {', '.join(role.mention for role in raffle.deputy_roles)}\n"
f"Deputy Members: {', '.join(member.mention for member in raffle.deputy_members)}\n"
f"Winner: {raffle.winner.mention if raffle.winner else None}\n"
f"Total Tickets: {sum(raffle.tickets.values())}\n"
),
inline=False,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class TicketsLeaderboardPaginator(BaseButtonPaginator[Tuple[discord.Member, int]]):
async def format_page(
self, tickets: List[Tuple[discord.Member, int]], /
) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = f"The tickets of {extras['name']} raffle are:\n\n"
for i, member_tickets in enumerate(tickets):
description += (
f"`{i + 1}.` {member_tickets[0].mention} - **{member_tickets[1]:,}**\n"
)
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['name'].title()} Raffle",
description=description,
color=self.bot.colour,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class RaffleBase(commands.GroupCog):
"""Cog containing admin commands for raffle management."""
bot: Giftify
@app_commands.command(name="create")
@app_commands.describe(
name="The unique name of the raffle.",
deputies="The list of members or roles who can manage the raffle.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def raffle_create(
self, | interaction: Interaction, | 7 | 2023-11-09 15:00:15+00:00 | 8k |
Zjy0401/CoCoFormer | conditional_generate.py | [
{
"identifier": "parse_generate_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_generate_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-midi_root\", type=str, default=\"./dataset/dataset/JSF\", help=\"Midi file to prime the generator with\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./generate\", help=\"Folder to write unconditional generated midi to\")\n parser.add_argument(\"-conditional_output_dir\", type=str, default=\"./generate\", help=\"conditional output dir\")\n parser.add_argument(\"-primer_file\", type=str, default=None, help=\"File path or integer index to the evaluation dataset. Default is to select a random index.\")\n parser.add_argument(\"--gpu\", default=[0], nargs='+', type=int, help=\"For Multi-GPUs generate\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-target_seq_length\", type=int, default=2048, help=\"Target length you'd like the midi to be\")\n parser.add_argument(\"-num_prime\", type=int, default=256, help=\"Amount of messages to prime the generator with\")\n parser.add_argument(\"-model_weights\", type=str, default=\"./baseline_loss3_CBSATBoutput_0.4_0.2_1/weights/epoch_0040.pickle\",\n help=\"Pickled model weights file saved with torch.save and model.state_dict()\")\n parser.add_argument(\"-beam\", type=int, default=0, help=\"Beam search k. 0 for random probability sample and 1 for greedy\")\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n return parser.parse_args()"
},
{
"identifier": "print_generate_args",
"path": "utilities/argument_funcs.py",
"snippet": "def print_generate_args(args):\n\n print(SEPERATOR)\n print(\"midi_root:\", args.midi_root)\n print(\"output_dir:\", args.output_dir)\n print(\"primer_file:\", args.primer_file)\n print(\"force_cpu:\", args.force_cpu)\n print(\"\")\n print(\"target_seq_length:\", args.target_seq_length)\n print(\"num_prime:\", args.num_prime)\n print(\"model_weights:\", args.model_weights)\n print(\"beam:\", args.beam)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(SEPERATOR)\n print(\"\")"
},
{
"identifier": "CoCoformer",
"path": "model/CoCoFormer.py",
"snippet": "class CoCoformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(CoCoformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n\n # past layer of chord\n self.cpast_layer_dmodel = d_model\n self.cpast_layer_nhead = 8\n self.cpast_dim_forward = 256\n self.cpast_layer_max_seq = 256\n self.cpast_layer_nlayers = 1\n\n # past layer of beats\n self.bpast_layer_dmodel = d_model\n self.bpast_layer_nhead = 8\n self.bpast_dim_forward = 256\n self.bpast_layer_max_seq = 1024\n self.bpast_layer_nlayers = 1\n\n # Input embedding\n self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)\n self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)\n # Positional encoding\n self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)\n self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)\n\n # Base transformer\n if not self.rpr:\n # To make a decoder-only transformer we need to use masked encoder layers\n # Dummy decoder to essentially just return the encoder output\n encoder_norm = LayerNorm(self.d_model)\n encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout)\n encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)\n encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,\n self.b_max_seq, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy\n )\n # RPR Transformer\n elif self.rpr:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,\n er_len=self.max_seq)\n encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout, er_len=self.max_seq)\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,\n self.c_max_seq, self.b_max_seq, encoder_norm)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n\n # Final output is a softmaxed linear layer\n # TODO: verify the size of linear\n self.Norm1 = nn.LayerNorm(1024)\n self.ReLU = nn.ReLU()\n self.Norm2 = nn.LayerNorm(181)\n self.Dropout = nn.Dropout(dropout)\n self.transLinear = nn.Linear(256, 256)\n self.Wout1 = nn.Linear(self.d_model, 1024)\n self.Wout2 = nn.Linear(1024, 1024)\n self.Wout3 = nn.Linear(1024, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n # forward\n def forward(self, x1, x2, x3, mask=True):\n\n args = parse_train_args()\n # for pure-Transformer:\n # Transformer module:\n if mask is True:\n if args.gpu[0] != -1:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])\n else:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()\n else:\n mask = None\n # Input shape is (max_seq, batch_size, d_model)\n x_n = self.n_embedding(x1)\n x_n = x_n.permute(1, 0, 2)\n x_n = self.n_positional_encoding(x_n)\n\n x_c = self.c_embedding(x2)\n x_c = x_c.permute(1, 0, 2)\n x_c = self.c_positional_encoding(x_c)\n\n x_b = self.b_embedding(x3)\n x_b = x_b.permute(1, 0, 2)\n x_b = self.b_positional_encoding(x_b)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,\n src_mask=mask)\n # x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n # concat\n # x_concat = torch.cat([x_out, x_out2], dim=1)\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))\n y = self.Wout3(y)\n # y = self.Wout2(y)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y\n\n # unconditional generate\n def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n\n print(\"Generating sequence of max length:\", target_seq_length)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n while cur_i < target_seq_length:\n # gen_seq_batch = gen_seq.clone()\n y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n\n # Let the transformer decide to end if it wants to\n # if next_token == TOKEN_END:\n # print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n # break\n\n cur_i += 1\n if cur_i % 50 == 0:\n print(cur_i, \"/\", target_seq_length)\n\n return gen_seq[:, :cur_i]\n\n # conditional generate\n def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n chord = torch.tensor(chord, device=get_device()).unsqueeze(0)\n beats = torch.tensor(beats, device=get_device()).unsqueeze(0)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])\n primer_num = 1 # decide key to add\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n # first input: C B N B\n cur_i_n = 1\n cur_i_b = 2\n cur_i_c = 1\n check_error = 0\n pbar = tqdm(total=len(seq)*9)\n while cur_i < target_seq_length:\n a = gen_seq[..., :cur_i].cpu().numpy()\n # gen_seq_batch = gen_seq.clone()\n # print(\"input:\", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])\n y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],\n beats[..., :cur_i_b]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n # check for y\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n if check_error > 256:\n print(\"error! regenerate!\")\n return False\n # next token is the next token\n if cur_i % 9 == 1: # token is chord, next token must be beats\n if not 178 < next_token < 191: # if it is not beat\n check_error += 1\n continue\n if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note\n if not next_token < 129: # if it is not note\n check_error += 1\n continue\n else: # this token must be note, next token must be chord or beat\n if not 128 < next_token < 191: # if it is chord or beat\n check_error += 1\n continue\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n cur_i += 1\n pbar.update(1)\n cur_i_n += 1\n if cur_i % 9 == 0 and primer_num < len(seq):\n # add C B_S N_S B_A\n gen_seq[:, cur_i] = chord.squeeze()[primer_num]\n gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())\n gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())\n gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())\n primer_num += 1\n cur_i += 4\n pbar.update(4)\n cur_i_n += 1\n cur_i_b += 2\n cur_i_c += 1\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:\n # add B\n gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]\n cur_i_b += 1\n cur_i_n += 1\n cur_i += 1\n pbar.update(1)\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if primer_num == len(seq) and cur_i == len(seq) * 9:\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n # print(cur_i, \"/\", target_seq_length)\n\n print(\"all errors:%d\" % check_error)\n return gen_seq[:, :cur_i]"
},
{
"identifier": "create_jsf_datasets",
"path": "dataset/jsf.py",
"snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test\")\n\n train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)\n # val_dataset = JSFDataset(val_root, max_seq, random_seq)\n test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)\n\n return train_dataset, test_dataset"
},
{
"identifier": "compute_jsf_accuracy",
"path": "dataset/jsf.py",
"snippet": "def compute_jsf_accuracy(out, tgt):\n\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n # test for bug:\n # out = np.array(out.cpu())\n # tgt = np.array(tgt.cpu())\n # only calculate note:\n\n # out = out[:, :2048].flatten()\n # tgt = tgt[:, :2048].flatten()\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != TOKEN_PAD)\n\n out = out[mask]\n tgt = tgt[mask]\n\n # Empty\n if (len(tgt) == 0):\n return 1.0\n\n num_right = (out == tgt)\n num_right = torch.sum(num_right).type(TORCH_FLOAT)\n\n acc = num_right / len(tgt)\n\n return acc"
},
{
"identifier": "process_midi",
"path": "dataset/jsf.py",
"snippet": "def process_midi(raw_mid, max_seq, random_seq):\n\n x = torch.full((max_seq,), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())\n tgt = torch.full((max_seq,), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())\n\n raw_len = len(raw_mid)\n full_seq = max_seq + 1 # Performing seq2seq\n\n if raw_len == 0:\n return x, tgt\n\n if raw_len < full_seq:\n x[:raw_len] = raw_mid\n tgt[:raw_len - 1] = raw_mid[1:]\n tgt[raw_len - 1] = TOKEN_END\n else:\n # Randomly selecting a range\n if random_seq:\n end_range = raw_len - full_seq\n start = random.randint(SEQUENCE_START, end_range)\n\n # Always taking from the start to as far as we can\n else:\n start = SEQUENCE_START\n\n end = start + full_seq\n\n data = raw_mid[start:end]\n\n x = data[:max_seq]\n tgt = data[1:full_seq]\n\n # print(\"x:\",x)\n # print(\"tgt:\",tgt)\n\n return x, tgt"
},
{
"identifier": "get_device",
"path": "utilities/device.py",
"snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE"
},
{
"identifier": "use_cuda",
"path": "utilities/device.py",
"snippet": "def use_cuda(cuda_bool):\n\n global USE_CUDA\n USE_CUDA = cuda_bool"
},
{
"identifier": "parse_generate_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_generate_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-midi_root\", type=str, default=\"./dataset/dataset/JSF\", help=\"Midi file to prime the generator with\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./generate\", help=\"Folder to write unconditional generated midi to\")\n parser.add_argument(\"-conditional_output_dir\", type=str, default=\"./generate\", help=\"conditional output dir\")\n parser.add_argument(\"-primer_file\", type=str, default=None, help=\"File path or integer index to the evaluation dataset. Default is to select a random index.\")\n parser.add_argument(\"--gpu\", default=[0], nargs='+', type=int, help=\"For Multi-GPUs generate\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-target_seq_length\", type=int, default=2048, help=\"Target length you'd like the midi to be\")\n parser.add_argument(\"-num_prime\", type=int, default=256, help=\"Amount of messages to prime the generator with\")\n parser.add_argument(\"-model_weights\", type=str, default=\"./baseline_loss3_CBSATBoutput_0.4_0.2_1/weights/epoch_0040.pickle\",\n help=\"Pickled model weights file saved with torch.save and model.state_dict()\")\n parser.add_argument(\"-beam\", type=int, default=0, help=\"Beam search k. 0 for random probability sample and 1 for greedy\")\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n return parser.parse_args()"
}
] | import torch
import torch.nn as nn
import os
import random
import math
import mido
import music21
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from utilities.argument_funcs import parse_generate_args, print_generate_args
from model.CoCoFormer import CoCoformer
from dataset.jsf import create_jsf_datasets, compute_jsf_accuracy, process_midi
from torch.utils.data import DataLoader
from torch.optim import Adam
from utilities.constants import *
from utilities.device import get_device, use_cuda
from utilities.argument_funcs import parse_generate_args | 6,803 | if note == 0:
note = i
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
note = i
time = 120
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
time += 120
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
def decode(index, file_path, single=False):
event = [word2event[i] for i in index]
print("decoding...")
s, a, t, b = [], [], [], []
if not single:
for key, value in enumerate(index):
if key % 9 == 2:
assert value < 129
s.append(value)
continue
if key % 9 == 4:
assert value < 129
a.append(value)
continue
if key % 9 == 6:
assert value < 129
t.append(value)
continue
if key % 9 == 8:
assert value < 129
b.append(value)
continue
mid = mido.MidiFile()
track_s = mido.MidiTrack()
mid.tracks.append(track_s)
create_track(track_s, s)
track_a = mido.MidiTrack()
mid.tracks.append(track_a)
create_track(track_a, a)
track_t = mido.MidiTrack()
mid.tracks.append(track_t)
create_track(track_t, t)
track_b = mido.MidiTrack()
mid.tracks.append(track_b)
create_track(track_b, b)
else:
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
create_track(track, seq)
mid.save(file_path)
plot_pianoroll(s, a, t, b)
print("midi save in:", file_path)
def plot_pianoroll(s, a, t, b):
'''
plot painoroll
input : seqs of words
output : a image of painoroll
'''
# build matrix
pianoroll = np.ones((180, 500))
def plot_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 0
def plot_main_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 2
plot_main_track(s)
plot_track(a)
plot_track(t)
plot_track(b)
pianoroll = np.flip(pianoroll[30:100], axis=0)
cmp = matplotlib.colors.ListedColormap(['g', 'w', 'b'])
plt.figure(1)
plt.imshow(pianoroll, cmap=cmp)
plt.show()
def conditional_generate(seq, chord, bs, ba, bt, bb):
assert len(seq) == len(chord) == len(bs) == len(ba) == len(bt) == len(bb)
beats = []
for i in range(len(bs)):
beats.extend((bs[i], ba[i], bt[i], bb[i]))
# deal with input: slice it < 128
input_note, input_chord, input_beats, input_bs, input_ba, input_bt, input_bb = [], [], [], [], [], [], []
loop = int(math.ceil(len(seq)/64))
for i in range(loop):
if i+64 <= len(seq):
input_note.append(seq[i*64: (i+1)*64])
input_chord.append(chord[i*64: (i+1)*64])
input_bs.append(bs[i*64: (i+1)*64])
input_ba.append(ba[i*64: (i+1)*64])
input_bt.append(bt[i*64: (i+1)*64])
input_bb.append(bb[i*64: (i+1)*64])
else:
input_note.append(seq[i:len(seq)])
input_chord.append(chord[i:len(seq)])
input_bs.append(bs[i:len(seq)])
input_ba.append(ba[i:len(seq)])
input_bt.append(bt[i:len(seq)])
input_bb.append(bb[i:len(seq)])
for p in range(len(input_bs)):
b = []
for q in range(len(input_bs[0])):
b.extend((input_bs[p][q], input_ba[p][q], input_bt[p][q], input_bb[p][q]))
input_beats.append(b)
args = parse_generate_args()
|
##### read word2event event2word
args = parse_generate_args()
f = open(args.word2event, 'rb')
word2event = pickle.load(f)
# reverse the vector event2word
event2word = {}
for key, val in word2event.items():
event2word[val] = key
def create_track(track, seq):
'''
create a midi track of seq
'''
note = 0
time = 120
for i in seq:
if note != int(i):
if note == 0:
note = i
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
note = i
time = 120
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
time += 120
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
def decode(index, file_path, single=False):
event = [word2event[i] for i in index]
print("decoding...")
s, a, t, b = [], [], [], []
if not single:
for key, value in enumerate(index):
if key % 9 == 2:
assert value < 129
s.append(value)
continue
if key % 9 == 4:
assert value < 129
a.append(value)
continue
if key % 9 == 6:
assert value < 129
t.append(value)
continue
if key % 9 == 8:
assert value < 129
b.append(value)
continue
mid = mido.MidiFile()
track_s = mido.MidiTrack()
mid.tracks.append(track_s)
create_track(track_s, s)
track_a = mido.MidiTrack()
mid.tracks.append(track_a)
create_track(track_a, a)
track_t = mido.MidiTrack()
mid.tracks.append(track_t)
create_track(track_t, t)
track_b = mido.MidiTrack()
mid.tracks.append(track_b)
create_track(track_b, b)
else:
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
create_track(track, seq)
mid.save(file_path)
plot_pianoroll(s, a, t, b)
print("midi save in:", file_path)
def plot_pianoroll(s, a, t, b):
'''
plot painoroll
input : seqs of words
output : a image of painoroll
'''
# build matrix
pianoroll = np.ones((180, 500))
def plot_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 0
def plot_main_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 2
plot_main_track(s)
plot_track(a)
plot_track(t)
plot_track(b)
pianoroll = np.flip(pianoroll[30:100], axis=0)
cmp = matplotlib.colors.ListedColormap(['g', 'w', 'b'])
plt.figure(1)
plt.imshow(pianoroll, cmap=cmp)
plt.show()
def conditional_generate(seq, chord, bs, ba, bt, bb):
assert len(seq) == len(chord) == len(bs) == len(ba) == len(bt) == len(bb)
beats = []
for i in range(len(bs)):
beats.extend((bs[i], ba[i], bt[i], bb[i]))
# deal with input: slice it < 128
input_note, input_chord, input_beats, input_bs, input_ba, input_bt, input_bb = [], [], [], [], [], [], []
loop = int(math.ceil(len(seq)/64))
for i in range(loop):
if i+64 <= len(seq):
input_note.append(seq[i*64: (i+1)*64])
input_chord.append(chord[i*64: (i+1)*64])
input_bs.append(bs[i*64: (i+1)*64])
input_ba.append(ba[i*64: (i+1)*64])
input_bt.append(bt[i*64: (i+1)*64])
input_bb.append(bb[i*64: (i+1)*64])
else:
input_note.append(seq[i:len(seq)])
input_chord.append(chord[i:len(seq)])
input_bs.append(bs[i:len(seq)])
input_ba.append(ba[i:len(seq)])
input_bt.append(bt[i:len(seq)])
input_bb.append(bb[i:len(seq)])
for p in range(len(input_bs)):
b = []
for q in range(len(input_bs[0])):
b.extend((input_bs[p][q], input_ba[p][q], input_bt[p][q], input_bb[p][q]))
input_beats.append(b)
args = parse_generate_args() | print_generate_args(args) | 1 | 2023-11-01 08:33:08+00:00 | 8k |
emadeldeen24/ECGTransForm | trainer.py | [
{
"identifier": "ecgTransForm",
"path": "models.py",
"snippet": "class ecgTransForm(nn.Module):\r\n def __init__(self, configs, hparams):\r\n super(ecgTransForm, self).__init__()\r\n\r\n filter_sizes = [5, 9, 11]\r\n self.conv1 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[0],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[0] // 2))\r\n self.conv2 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[1],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[1] // 2))\r\n self.conv3 = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[2],\r\n stride=configs.stride, bias=False, padding=(filter_sizes[2] // 2))\r\n\r\n self.bn = nn.BatchNorm1d(configs.mid_channels)\r\n self.relu = nn.ReLU()\r\n self.mp = nn.MaxPool1d(kernel_size=2, stride=2, padding=1)\r\n self.do = nn.Dropout(configs.dropout)\r\n\r\n\r\n self.conv_block2 = nn.Sequential(\r\n nn.Conv1d(configs.mid_channels, configs.mid_channels * 2, kernel_size=8, stride=1, bias=False, padding=4),\r\n nn.BatchNorm1d(configs.mid_channels * 2),\r\n nn.ReLU(),\r\n nn.MaxPool1d(kernel_size=2, stride=2, padding=1)\r\n )\r\n\r\n self.conv_block3 = nn.Sequential(\r\n nn.Conv1d(configs.mid_channels * 2, configs.final_out_channels, kernel_size=8, stride=1, bias=False,\r\n padding=4),\r\n nn.BatchNorm1d(configs.final_out_channels),\r\n nn.ReLU(),\r\n nn.MaxPool1d(kernel_size=2, stride=2, padding=1),\r\n )\r\n \r\n self.inplanes = 128\r\n self.crm = self._make_layer(SEBasicBlock, 128, 3)\r\n\r\n self.encoder_layer = nn.TransformerEncoderLayer(d_model=configs.trans_dim, nhead=configs.num_heads, batch_first=True)\r\n self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=3)\r\n\r\n self.aap = nn.AdaptiveAvgPool1d(1)\r\n self.clf = nn.Linear(hparams[\"feature_dim\"], configs.num_classes)\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1): # makes residual SE block\r\n downsample = None\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv1d(self.inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm1d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x_in):\r\n\r\n # Multi-scale Convolutions\r\n x1 = self.conv1(x_in)\r\n x2 = self.conv2(x_in)\r\n x3 = self.conv3(x_in)\r\n\r\n x_concat = torch.mean(torch.stack([x1, x2, x3],2), 2)\r\n x_concat = self.do(self.mp(self.relu(self.bn(x_concat))))\r\n\r\n x = self.conv_block2(x_concat)\r\n x = self.conv_block3(x)\r\n\r\n # Channel Recalibration Module\r\n x = self.crm(x)\r\n\r\n # Bi-directional Transformer\r\n x1 = self.transformer_encoder(x)\r\n x2 = self.transformer_encoder(torch.flip(x,[2]))\r\n x = x1+x2\r\n\r\n x = self.aap(x)\r\n x_flat = x.reshape(x.shape[0], -1)\r\n x_out = self.clf(x_flat)\r\n return x_out\r"
},
{
"identifier": "data_generator",
"path": "dataloader.py",
"snippet": "def data_generator(data_path, data_type, hparams):\r\n # original\r\n train_dataset = torch.load(os.path.join(data_path, data_type, f\"train.pt\"))\r\n val_dataset = torch.load(os.path.join(data_path, data_type, f\"val.pt\"))\r\n test_dataset = torch.load(os.path.join(data_path, data_type, f\"test.pt\"))\r\n\r\n # Loading datasets\r\n train_dataset = Load_Dataset(train_dataset)\r\n val_dataset = Load_Dataset(val_dataset)\r\n test_dataset = Load_Dataset(test_dataset)\r\n\r\n cw = train_dataset.y_data.numpy().tolist()\r\n cw_dict = {}\r\n for i in range(len(np.unique(train_dataset.y_data.numpy()))):\r\n cw_dict[i] = cw.count(i)\r\n # print(cw_dict)\r\n\r\n # Dataloaders\r\n batch_size = hparams[\"batch_size\"]\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,\r\n shuffle=True, drop_last=True, num_workers=0)\r\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size,\r\n shuffle=False, drop_last=True, num_workers=0)\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size,\r\n shuffle=False, drop_last=False, num_workers=0)\r\n return train_loader, val_loader, test_loader, cw_dict\r"
},
{
"identifier": "get_dataset_class",
"path": "configs/data_configs.py",
"snippet": "def get_dataset_class(dataset_name):\r\n \"\"\"Return the algorithm class with the given name.\"\"\"\r\n if dataset_name not in globals():\r\n raise NotImplementedError(\"Dataset not found: {}\".format(dataset_name))\r\n return globals()[dataset_name]\r"
},
{
"identifier": "get_hparams_class",
"path": "configs/hparams.py",
"snippet": "def get_hparams_class(dataset_name):\r\n \"\"\"Return the algorithm class with the given name.\"\"\"\r\n if dataset_name not in globals():\r\n raise NotImplementedError(\"Algorithm not found: {}\".format(dataset_name))\r\n return globals()[dataset_name]\r"
},
{
"identifier": "AverageMeter",
"path": "utils.py",
"snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r"
},
{
"identifier": "to_device",
"path": "utils.py",
"snippet": "def to_device(input, device):\r\n if torch.is_tensor(input):\r\n return input.to(device=device)\r\n elif isinstance(input, str):\r\n return input\r\n elif isinstance(input, collections.Mapping):\r\n return {k: to_device(sample, device=device) for k, sample in input.items()}\r\n elif isinstance(input, collections.Sequence):\r\n return [to_device(sample, device=device) for sample in input]\r\n else:\r\n raise TypeError(\"Input must contain tensor, dict or list, found {type(input)}\")\r"
},
{
"identifier": "_save_metrics",
"path": "utils.py",
"snippet": "def _save_metrics(pred_labels, true_labels, log_dir, home_path, classes_names):\r\n pred_labels = np.array(pred_labels).astype(int)\r\n true_labels = np.array(true_labels).astype(int)\r\n\r\n r = classification_report(true_labels, pred_labels, digits=6, output_dict=True)\r\n\r\n df = pd.DataFrame(r)\r\n accuracy = accuracy_score(true_labels, pred_labels)\r\n df[\"accuracy\"] = accuracy\r\n df = df * 100\r\n\r\n # save classification report\r\n file_name = \"classification_report.xlsx\"\r\n report_Save_path = os.path.join(home_path, log_dir, file_name)\r\n df.to_excel(report_Save_path)\r"
},
{
"identifier": "copy_Files",
"path": "utils.py",
"snippet": "def copy_Files(destination):\r\n destination_dir = os.path.join(destination, \"MODEL_BACKUP_FILES\")\r\n os.makedirs(destination_dir, exist_ok=True)\r\n copy(\"main.py\", os.path.join(destination_dir, \"main.py\"))\r\n copy(\"dataloader.py\", os.path.join(destination_dir, \"dataloader.py\"))\r\n copy(f\"models.py\", os.path.join(destination_dir, f\"models.py\"))\r\n copy(f\"configs/data_configs.py\", os.path.join(destination_dir, f\"data_configs.py\"))\r\n copy(f\"configs/hparams.py\", os.path.join(destination_dir, f\"hparams.py\"))\r\n copy(f\"trainer.py\", os.path.join(destination_dir, f\"trainer.py\"))\r\n copy(\"utils.py\", os.path.join(destination_dir, \"utils.py\"))\r"
},
{
"identifier": "_plot_umap",
"path": "utils.py",
"snippet": "def _plot_umap(model, data_loader, device, save_dir):\r\n import umap\r\n import umap.plot\r\n from matplotlib.colors import ListedColormap\r\n classes_names = ['N','S','V','F','Q']\r\n \r\n font = {'family' : 'Times New Roman',\r\n 'weight' : 'bold',\r\n 'size' : 17}\r\n plt.rc('font', **font)\r\n \r\n with torch.no_grad():\r\n # Source flow\r\n data = data_loader.dataset.x_data.float().to(device)\r\n labels = data_loader.dataset.y_data.view((-1)).long()\r\n out = model[0](data)\r\n features = model[1](out)\r\n\r\n\r\n if not os.path.exists(os.path.join(save_dir, \"umap_plots\")):\r\n os.mkdir(os.path.join(save_dir, \"umap_plots\"))\r\n \r\n #cmaps = plt.get_cmap('jet')\r\n model_reducer = umap.UMAP() #n_neighbors=3, min_dist=0.3, metric='correlation', random_state=42)\r\n embedding = model_reducer.fit_transform(features.detach().cpu().numpy())\r\n \r\n # Normalize the labels to [0, 1] for colormap\r\n norm_labels = labels / 4.0\r\n \r\n\r\n # Create a new colormap by extracting the first 5 colors from \"Paired\"\r\n paired = plt.cm.get_cmap('Paired', 12) # 12 distinct colors\r\n new_colors = [paired(0), paired(1), paired(2), paired(4), paired(6)] # Skip every second color, but take both from the first pair\r\n new_cmap = ListedColormap(new_colors)\r\n\r\n print(\"Plotting UMAP ...\")\r\n plt.figure(figsize=(16, 10))\r\n # scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, s=10, cmap='Spectral')\r\n scatter = plt.scatter(embedding[:, 0], embedding[:, 1], c=norm_labels, cmap=new_cmap, s=15)\r\n\r\n handles, _ = scatter.legend_elements(prop='colors')\r\n plt.legend(handles, classes_names, title=\"Classes\")\r\n file_name = \"umap_.png\"\r\n fig_save_name = os.path.join(save_dir, \"umap_plots\", file_name)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.savefig(fig_save_name, bbox_inches='tight')\r\n plt.close()\r"
},
{
"identifier": "fix_randomness",
"path": "utils.py",
"snippet": "def fix_randomness(SEED):\r\n random.seed(SEED)\r\n np.random.seed(SEED)\r\n torch.manual_seed(SEED)\r\n torch.cuda.manual_seed(SEED)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r"
},
{
"identifier": "starting_logs",
"path": "utils.py",
"snippet": "def starting_logs(data_type, exp_log_dir, seed_id):\r\n log_dir = os.path.join(exp_log_dir, \"_seed_\" + str(seed_id))\r\n os.makedirs(log_dir, exist_ok=True)\r\n log_file_name = os.path.join(log_dir, f\"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log\")\r\n logger = _logger(log_file_name)\r\n logger.debug(\"=\" * 45)\r\n logger.debug(f'Dataset: {data_type}')\r\n logger.debug(\"=\" * 45)\r\n logger.debug(f'Seed: {seed_id}')\r\n logger.debug(\"=\" * 45)\r\n return logger, log_dir\r"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(home_path, model, dataset, dataset_configs, log_dir, hparams):\r\n save_dict = {\r\n \"dataset\": dataset,\r\n \"configs\": dataset_configs.__dict__,\r\n \"hparams\": dict(hparams),\r\n \"model\": model[0].state_dict(),\r\n \"clf\": model[1].state_dict()\r\n }\r\n # save classification report\r\n save_path = os.path.join(home_path, log_dir, \"checkpoint.pt\")\r\n\r\n torch.save(save_dict, save_path)\r"
},
{
"identifier": "_calc_metrics",
"path": "utils.py",
"snippet": "def _calc_metrics(pred_labels, true_labels, classes_names):\r\n pred_labels = np.array(pred_labels).astype(int)\r\n true_labels = np.array(true_labels).astype(int)\r\n\r\n r = classification_report(true_labels, pred_labels, target_names=classes_names, digits=6, output_dict=True)\r\n accuracy = accuracy_score(true_labels, pred_labels)\r\n\r\n return accuracy * 100, r[\"macro avg\"][\"f1-score\"] * 100\r"
}
] | import torch
import torch.nn.functional as F
import os
import collections
import numpy as np
import warnings
import sklearn.exceptions
from models import ecgTransForm
from dataloader import data_generator
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, _plot_umap
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics | 4,063 | warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
data_generator(self.data_path, data_type, self.hparams)
def calc_results_per_run(self):
acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
return acc, f1
def train(self):
copy_Files(self.exp_log_dir) # save a copy of training files
self.metrics = {'accuracy': [], 'f1_score': []}
# fixing random seed
fix_randomness(int(self.seed_id))
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
self.logger.debug(self.hparams)
# Load data
self.load_data(self.dataset)
model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
model.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
self.optimizer = torch.optim.Adam(
model.parameters(),
lr=self.hparams["learning_rate"],
weight_decay=self.hparams["weight_decay"],
betas=(0.9, 0.99)
)
self.cross_entropy = torch.nn.CrossEntropyLoss(weight=torch.tensor(np.array(self.cw_dict.values())).float().to(self.device))
best_acc = 0
best_f1 = 0
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
model.train()
for step, batches in enumerate(self.train_dl):
batches = to_device(batches, self.device)
data = batches['samples'].float()
labels = batches['labels'].long()
# ====== Source =====================
self.optimizer.zero_grad()
# Src original features
logits = model(data)
# Cross-Entropy loss
x_ent_loss = self.cross_entropy(logits, labels)
x_ent_loss.backward()
self.optimizer.step()
losses = {'Total_loss': x_ent_loss.item()}
for key, val in losses.items():
loss_avg_meters[key].update(val, self.hparams["batch_size"])
self.evaluate(model, self.val_dl)
tr_acc, tr_f1 = self.calc_results_per_run()
# logging
self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
for key, val in loss_avg_meters.items():
self.logger.debug(f'{key}\t: {val.avg:2.4f}')
self.logger.debug(f'TRAIN: Acc:{tr_acc:2.4f} \t F1:{tr_f1:2.4f}')
# VALIDATION part
self.evaluate(model, self.val_dl)
ts_acc, ts_f1 = self.calc_results_per_run()
if ts_f1 > best_f1: # save best model based on best f1.
best_f1 = ts_f1
best_acc = ts_acc
save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "best")
|
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
data_generator(self.data_path, data_type, self.hparams)
def calc_results_per_run(self):
acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
return acc, f1
def train(self):
copy_Files(self.exp_log_dir) # save a copy of training files
self.metrics = {'accuracy': [], 'f1_score': []}
# fixing random seed
fix_randomness(int(self.seed_id))
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
self.logger.debug(self.hparams)
# Load data
self.load_data(self.dataset)
model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
model.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
self.optimizer = torch.optim.Adam(
model.parameters(),
lr=self.hparams["learning_rate"],
weight_decay=self.hparams["weight_decay"],
betas=(0.9, 0.99)
)
self.cross_entropy = torch.nn.CrossEntropyLoss(weight=torch.tensor(np.array(self.cw_dict.values())).float().to(self.device))
best_acc = 0
best_f1 = 0
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
model.train()
for step, batches in enumerate(self.train_dl):
batches = to_device(batches, self.device)
data = batches['samples'].float()
labels = batches['labels'].long()
# ====== Source =====================
self.optimizer.zero_grad()
# Src original features
logits = model(data)
# Cross-Entropy loss
x_ent_loss = self.cross_entropy(logits, labels)
x_ent_loss.backward()
self.optimizer.step()
losses = {'Total_loss': x_ent_loss.item()}
for key, val in losses.items():
loss_avg_meters[key].update(val, self.hparams["batch_size"])
self.evaluate(model, self.val_dl)
tr_acc, tr_f1 = self.calc_results_per_run()
# logging
self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
for key, val in loss_avg_meters.items():
self.logger.debug(f'{key}\t: {val.avg:2.4f}')
self.logger.debug(f'TRAIN: Acc:{tr_acc:2.4f} \t F1:{tr_f1:2.4f}')
# VALIDATION part
self.evaluate(model, self.val_dl)
ts_acc, ts_f1 = self.calc_results_per_run()
if ts_f1 > best_f1: # save best model based on best f1.
best_f1 = ts_f1
best_acc = ts_acc
save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "best") | _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, | 6 | 2023-11-06 14:11:19+00:00 | 8k |
WMD-group/CrystalSpace | app.py | [
{
"identifier": "get_plotly_embedding",
"path": "visualize_app/visualize_embedding.py",
"snippet": "def get_plotly_embedding(\n df: pd.DataFrame = None,\n opacity: float = 0.2,\n **kwargs,\n) -> go.Figure:\n \"\"\"\n Plot the embedding of a dataframe with plotly.\n\n Args:\n df: dataframe with columns x, y, z, smact_allowed, mp_data.\n opacity: opacity of the markers. Default is 0.8.\n kwargs: additional keyword arguments.\n Returns:\n fig: plotly figure object.\n \"\"\"\n # check if the dataframe is empty\n if df is None:\n return go.Figure()\n\n fig = px.scatter_3d(\n df,\n x=\"x\",\n y=\"y\",\n z=\"z\",\n template=\"plotly_white\",\n color=\"label\",\n color_discrete_map={\n \"0\": \"#D9D9D9\",\n \"1\": \"rgba(34, 224, 0, 0.8)\",\n \"2\": \"rgba(255, 18, 1, 0.8)\",\n \"3\": \"rgba(0, 47, 255, 0.8)\",\n # \"0\": px.colors.qualitative.Vivid[-1], # \"#D9D9D9\"\n # \"1\": px.colors.qualitative.Vivid[0], # \"#22E000\",\n # \"2\": px.colors.qualitative.Vivid[1], # \"#FF1201\",\n # \"3\": px.colors.qualitative.Vivid[2], # \"#002FFF\",\n },\n opacity=opacity,\n hover_data=[\n \"formula\",\n ],\n )\n\n # update hovertemplate\n fig.update_traces(\n hovertemplate=\"<br>\".join(\n [\n \"formula: %{customdata[0]}\",\n ]\n )\n )\n\n # remove the background grid and axes and ticks and tick labels\n fig.update_layout(\n scene=dict(\n xaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n yaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n zaxis=dict(\n showticklabels=False,\n title=\"\",\n ),\n ),\n )\n\n # set title\n if \"title\" in kwargs:\n fig.update_layout(\n title=dict(\n text=kwargs[\"title\"],\n font=dict(size=20),\n x=0.5,\n y=0.95,\n xanchor=\"center\",\n yanchor=\"top\",\n )\n )\n\n # update the legend labels\n legend_label_map = {\n \"0\": \"Unlikely (False, False)\",\n \"1\": \"Interesting (False, True)\",\n \"2\": \"Missing (True, False)\",\n \"3\": \"Standard (True, True)\",\n }\n\n for trace in fig.data:\n trace.name = legend_label_map[trace.name]\n\n # update the marker\n\n fig.update_traces(\n marker=dict(\n size=5,\n # line=dict(width=0.5, color=\"Grey\"),\n ),\n selector=dict(mode=\"markers\"),\n )\n\n # update the legend title\n fig.update_layout(\n legend_title_text=\" click legend 👆 <br>(smact_allowed, mp_data)\",\n )\n return fig"
},
{
"identifier": "get_plotly_structure",
"path": "visualize_app/visualize_structure.py",
"snippet": "def get_plotly_structure(structure: Structure = None) -> go.Figure:\n \"\"\"\n Plot a pymatgen structure with its unit cell using plotly.\n Args:\n structure: pymatgen structure object.\n kwargs: additional keyword arguments.\n Returns:\n fig: plotly figure object.\n \"\"\"\n if structure is None:\n return px.scatter_3d()\n\n # Getting atomic positions and species using list comprehension\n positions = [site.coords for site in structure]\n species = [str(site.specie) for site in structure]\n\n # Getting atomic colors\n atomic_colors = [jmol_colors[Element(specie).Z] for specie in species]\n\n # Getting atomic radii\n # atomic_radii = [float(Element(specie).atomic_radius) for specie in species]\n\n # Extracting x, y, and z coordinates\n x, y, z = zip(*positions)\n\n # Getting lattice vectors\n a, b, c = structure.lattice.matrix\n\n # Define lines for the unit cell\n lines = [\n [[0, 0, 0], a],\n [[0, 0, 0], b],\n [[0, 0, 0], c],\n [a, a + b],\n [a, a + c],\n [b, b + a],\n [b, b + c],\n [c, c + a],\n [c, c + b],\n [a + b, a + b + c],\n [a + c, a + c + b],\n [b + c, b + c + a],\n ]\n\n # scatter atoms\n trace_atoms = go.Scatter3d(\n x=x,\n y=y,\n z=z,\n mode=\"markers\",\n text=species,\n hoverinfo=\"text\",\n marker=dict(\n symbol=\"circle\",\n sizemode=\"diameter\",\n color=atomic_colors,\n # size=[20 * r for r in atomic_radii],\n size=20,\n line=dict(color=\"black\", width=5),\n ),\n )\n\n # draw unit cell\n trace_lines = []\n for line in lines:\n x_values, y_values, z_values = zip(*line)\n trace_lines.append(\n go.Scatter3d(\n x=x_values,\n y=y_values,\n z=z_values,\n mode=\"lines\",\n line=dict(color=\"black\"),\n )\n )\n\n # remove the background grid\n layout = go.Layout(\n scene=dict(\n xaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n yaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n zaxis=dict(\n showticklabels=False,\n title=\"\",\n showgrid=False,\n zeroline=False,\n showline=False,\n visible=False,\n ),\n ),\n showlegend=False,\n )\n\n fig = go.Figure(data=[trace_atoms, *trace_lines], layout=layout)\n return fig"
},
{
"identifier": "fn_chemical_check",
"path": "visualize_app/utils.py",
"snippet": "def fn_chemical_check(\n df_embedding: pd.DataFrame, species_1: str, species_2: str\n) -> np.array:\n \"\"\"\n Check if the chemical system contains the specified species.\n\n Args:\n df_embedding (pd.DataFrame): Embedding dataframe.\n species_1 (str): Chemical species 1.\n species_2 (str): Chemical species 2.\n\n Returns:\n np.array: Boolean array for the chemical systems that contain the specified species.\n \"\"\"\n\n chemicals = np.array(df_embedding.index)\n\n # regular expression patterns\n pattern_1 = r\"{}(?:(?={})|(?![a-zA-Z]))\".format(species_1, species_2)\n pattern_2 = r\"{}(?:(?={})|(?![a-zA-Z]))\".format(species_2, species_1)\n # get the mask\n mask = np.array(\n [\n True\n if re.search(pattern_1, chemical)\n and re.search(pattern_2, chemical)\n else True\n if re.search(pattern_1, chemical) and species_2 == \"default\"\n else True\n if re.search(pattern_2, chemical) and species_1 == \"default\"\n else True\n if species_1 == \"default\" and species_2 == \"default\"\n else False\n for chemical in chemicals\n ]\n )\n\n return mask"
},
{
"identifier": "blank_fig",
"path": "visualize_app/utils.py",
"snippet": "def blank_fig():\n fig = go.Figure(go.Scatter(x=[], y=[]))\n fig.update_layout(template=None)\n fig.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\n fig.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\n\n return fig"
}
] | import os
import ase
import pandas as pd
import dash_bootstrap_components as dbc
from pathlib import Path
from fire import Fire
from pymatgen.core import Structure
from dash import Dash, html, Input, Output, dcc, dash_table, no_update
from visualize_app.visualize_embedding import get_plotly_embedding
from visualize_app.visualize_structure import get_plotly_structure
from visualize_app.utils import fn_chemical_check, blank_fig | 3,834 | html.Div(
id="table",
),
]
),
style={"border": "none"},
),
# set the footer
# add line
html.Hr(),
dbc.Row(
[
dbc.Col(
html.H6(
html.A(
"Created by Hyunsoo Park in the Materials Design Group (Imperial College London)",
href="https://github.com/wmd-group",
style={"color": "black"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 1
dbc.Col(
html.H6(
html.A(
"1. Composition generation using SMACT",
href="https://github.com/WMD-group/SMACT",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 2
dbc.Col(
html.H6(
html.A(
"2. Element embedding vectors from ElementEmbeddings",
href="https://github.com/WMD-group/ElementEmbeddings",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 3
dbc.Col(
html.H6(
html.A(
"3. Structure data from Materials Project",
href="https://materialsproject.org",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 4
dbc.Col(
html.H6(
html.A(
"4. Dimensionality reduction using scikit-learn",
href="https://scikit-learn.org/stable/",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
],
justify="start",
),
]
)
# set the callback for the scatter plot
@app.callback(
[
Output("method-name", "children"),
Output("3d-scatter-plot", "figure"),
],
Input("reduction-method-select", "value"),
Input("embedding-method-select", "value"),
Input("chemical-system-select-1", "value"),
Input("chemical-system-select-2", "value"),
)
def update_3d_scatter_plot(
reduction_method,
embedding_method,
chemical_system_1,
chemical_system_2,
):
# set the path to the embedding
path_embedding = Path(PARENT_DIR, "visualize_app/assets/reduced_embeddings_3d")
path_embedding = (
path_embedding / f"{reduction_method}_{embedding_method}_mean.pkl"
)
if not path_embedding.exists():
raise FileNotFoundError(f"Embedding file {path_embedding} does not exist.")
# read the embedding
df_embedding = pd.read_pickle(path_embedding)
df_embedding.columns = ["x", "y", "z"]
df_embedding["formula"] = df_embedding.index
# merge the total data with the embedding
df_plot = df_embedding.join(LABEL_DATA)
# check if the chemical system contains the specified species
|
PARENT_DIR = Path(os.path.dirname(__file__))
# load label data
LABEL_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_label.pkl")
LABEL_DATA["label"] = LABEL_DATA["label"].astype(str)
# load materials project data
MP_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_mp.pkl")
def main(
debug: bool = False,
host: str = "0.0.0.0",
port: int = 8050,
):
"""Visualize the embedding of binary compounds.
:param debug: Debug mode, defaults to False
:param host: host address, defaults to "0.0.0.0"
:param port: port number, defaults to 8050
"""
# initialize the app - incorporate a Dash Bootstrap theme
external_stylesheets = [dbc.themes.MINTY]
app = Dash(__name__, external_stylesheets=external_stylesheets)
# app layout
app.layout = dbc.Container(
[
# set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot",
figure=blank_fig(),
),
]
),
]
),
width=6,
),
# set the crystal structure
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Structure",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
# name of the crystal structure
dcc.Markdown(
id="crystal-structure-name",
children="Click a point on the scatter plot",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
# graph
dcc.Graph(
id="crystal-structure",
figure=blank_fig(),
),
]
),
]
),
width=6,
),
],
justify="start",
),
html.Br(),
# set a table with properties
dbc.Card(
dbc.CardBody(
[
html.Div(
id="table",
),
]
),
style={"border": "none"},
),
# set the footer
# add line
html.Hr(),
dbc.Row(
[
dbc.Col(
html.H6(
html.A(
"Created by Hyunsoo Park in the Materials Design Group (Imperial College London)",
href="https://github.com/wmd-group",
style={"color": "black"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 1
dbc.Col(
html.H6(
html.A(
"1. Composition generation using SMACT",
href="https://github.com/WMD-group/SMACT",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 2
dbc.Col(
html.H6(
html.A(
"2. Element embedding vectors from ElementEmbeddings",
href="https://github.com/WMD-group/ElementEmbeddings",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 3
dbc.Col(
html.H6(
html.A(
"3. Structure data from Materials Project",
href="https://materialsproject.org",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 4
dbc.Col(
html.H6(
html.A(
"4. Dimensionality reduction using scikit-learn",
href="https://scikit-learn.org/stable/",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
],
justify="start",
),
]
)
# set the callback for the scatter plot
@app.callback(
[
Output("method-name", "children"),
Output("3d-scatter-plot", "figure"),
],
Input("reduction-method-select", "value"),
Input("embedding-method-select", "value"),
Input("chemical-system-select-1", "value"),
Input("chemical-system-select-2", "value"),
)
def update_3d_scatter_plot(
reduction_method,
embedding_method,
chemical_system_1,
chemical_system_2,
):
# set the path to the embedding
path_embedding = Path(PARENT_DIR, "visualize_app/assets/reduced_embeddings_3d")
path_embedding = (
path_embedding / f"{reduction_method}_{embedding_method}_mean.pkl"
)
if not path_embedding.exists():
raise FileNotFoundError(f"Embedding file {path_embedding} does not exist.")
# read the embedding
df_embedding = pd.read_pickle(path_embedding)
df_embedding.columns = ["x", "y", "z"]
df_embedding["formula"] = df_embedding.index
# merge the total data with the embedding
df_plot = df_embedding.join(LABEL_DATA)
# check if the chemical system contains the specified species | mask = fn_chemical_check(df_plot, chemical_system_1, chemical_system_2) | 2 | 2023-11-07 17:10:38+00:00 | 8k |
Infotrend-Inc/OpenAI_WebUI | OpenAI_WebUI.py | [
{
"identifier": "OAI_GPT",
"path": "OpenAI_GPT.py",
"snippet": "class OAI_GPT:\n def __init__(self, apikey, save_location, models_list):\n self.last_gpt_query = 'last_gpt_query'\n\n self.apikey = apikey\n self.save_location = save_location\n\n self.models_supported = models_list\n self.set_parameters(models_list)\n\n\n#####\n# https://platform.openai.com/docs/models/continuous-model-upgrades\n def set_parameters(self, models_list):\n models = {}\n model_help = \"\"\n\n all = {\n \"gpt-3.5-turbo\":\n {\n \"label\": \"Most capable GPT-3.5 model and optimized for chat. Will be updated with OpenAI's latest model iteration. For many basic tasks, the difference between GPT-4 and GPT-3.5 models is not significant. However, in more complex reasoning situations, GPT-4 is much more capable.\",\n \"max_token\": 4000,\n \"data\": \"Up to Sep 2021 (as of 20231108)\"\n },\n \"gpt-3.5-turbo-16k\":\n {\n \"label\": \"Same capabilities as the standard gpt-3.5-turbo model but with 4 times the context.\",\n \"max_token\": 16000,\n \"data\": \"Up to Sep 2021 (as of 20231108)\"\n },\n \"gpt-3.5-turbo-1106\":\n {\n \"label\": \"The latest GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens.\",\n \"max_token\": 4000,\n \"data\": \"Up to Sep 2021 (as of 20231118)\"\n },\n \"gpt-4\":\n {\n \"label\": \"More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.\",\n \"max_token\": 8192,\n \"data\": \"Up to Sep 2021 (as of 20231108)\"\n },\n \"gpt-4-32k\":\n {\n \"label\": \"Same capabilities as the base gpt-4 mode but with 4x the context length.\",\n \"max_token\": 32768,\n \"data\": \"Up to Sep 2021 (as of 20231108)\"\n },\n \"gpt-4-1106-preview\":\n {\n \"label\": \"The latest GPT-4 model (with 128k tokens) with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This preview model is not yet suited for production traffic.\",\n \"max_token\": 4096,\n \"data\": \"Up to Apr 2023 (as of 20231108)\"\n }\n }\n\n s_models_list = models_list.split(\",\")\n known_models = list(all.keys())\n for t_model in s_models_list:\n model = t_model.strip()\n if model in all:\n models[model] = all[model]\n else:\n st.error(f\"Unknown model: {model} | Known models: {known_models}\")\n cf.error_exit(f\"Unknown model {model}\")\n\n model_help = \"\"\n for key in models:\n model_help += key + \":\\n\"\n model_help += models[key][\"label\"] + \"\\n\"\n model_help += \"max_token: \" + str(models[key][\"max_token\"]) + \"\\n\"\n model_help += \"data: \" + models[key][\"data\"] + \"\\n\\n\"\n\n self.models = models\n self.model_help = model_help\n\n self.gpt_presets = {\n \"None\": {\n \"pre\": \"\",\n \"post\": \"\",\n \"kwargs\": {}\n },\n \"Keywords\": {\n \"pre\": \"Extract keywords from this text: \",\n \"post\": \"\",\n \"kwargs\": {\"top_p\": 1.0, \"frequency_penalty\": 0.8, \"presence_penalty\": 0.0}\n },\n \"Summarization\": {\n \"pre\": \"\",\n \"post\": \"Tl;dr\",\n \"kwargs\": {\"top_p\": 1.0, \"frequency_penalty\": 0.0, \"presence_penalty\": 1}\n }\n }\n\n self.gpt_presets_help = \"None: regular, no additonal parameters\\n\\nKeywords: Extract keywords from a block of text. At a lower temperature it picks keywords from the text. At a higher temperature it will generate related keywords which can be helpful for creating search indexes.\\n\\nSummarization: Summarize text.\"\n\n self.gpt_roles = {\n 'user': 'help instruct the assistant',\n 'system': 'helps set the behavior of the assistant (ex: \"You are a helpful assistant. You also like to speak in the words of Shakespeare. Incorporate that into your responses.\")',\n 'assistant': 'helps set the past conversations. This is relevant when you had a chat that went over the maximum number of tokens and need to start a new one: give the chat history some fresh context'\n } \n\n self.gpt_roles_help = \"\"\n for key in self.gpt_roles:\n self.gpt_roles_help += key + \":\\n\" + self.gpt_roles[key] + \"\\n\\n\"\n\n\n#####\n def get_rf_role_prompt_response(self, run_file):\n run_json = cf.get_run_file(run_file)\n role = \"\"\n if 'role' in run_json:\n role = run_json['role']\n prompt = \"\"\n if 'prompt' in run_json:\n prompt = run_json['prompt']\n response = \"\"\n if 'response' in run_json:\n response = run_json['response']\n return (role, prompt, response)\n\n\n#####\n def get_dest_dir(self):\n return os.path.join(self.save_location, \"gpt\", cf.get_timeUTC())\n\n\n#####\n def format_rpr(self, role, prompt, response):\n return (f\"\\n\\n--------------------------\\n\\n -- role: {role}\\n\\n -- prompt: {prompt}\\n\\n -- response: {response }\\n\\n\")\n\n#####\n def get_chat_history(self, run_file):\n run_json = cf.get_run_file(run_file)\n if 'last_run_file' in run_json:\n (role, prompt, response) = self.get_rf_role_prompt_response(run_file)\n txt = self.format_rpr(role, prompt, response)\n last_run_file = run_json['last_run_file']\n if cf.isNotBlank(last_run_file):\n tmp = self.get_chat_history(last_run_file)\n return (self.get_chat_history(last_run_file) + txt)\n else:\n return (txt)\n else: # last one, return the formatted text\n (role, prompt, response) = self.get_rf_role_prompt_response(run_file)\n return(self.format_rpr(role, prompt, response))\n\n\n#####\n def chatgpt_it(self, model_engine, prompt, max_tokens, temperature, dest_dir, clear_chat, role, **kwargs):\n err = cf.check_existing_dir_w(dest_dir)\n if cf.isNotBlank(err):\n st.error(f\"While checking {dest_dir}: {err}\")\n cf.error_exit(err)\n\n messages = []\n last_run_file = None\n if not clear_chat:\n # Obtain previous messages\n if self.last_gpt_query in st.session_state:\n run_file = st.session_state[self.last_gpt_query]\n old_run_json = cf.get_run_file(run_file)\n if 'messages' in old_run_json:\n messages = old_run_json['messages']\n last_run_file = run_file\n\n messages.append({ 'role': role, 'content': prompt })\n\n err, response = gpt_call(self.apikey, messages, model_engine, max_tokens, temperature, **kwargs)\n if cf.isNotBlank(err):\n return err, \"\"\n\n runid = cf.get_runid()\n run_file = f\"{dest_dir}/run---{runid}.json\"\n run_json = {\n \"role\": role,\n \"prompt\": prompt,\n \"response\": response,\n 'messages': messages,\n 'last_run_file': last_run_file,\n }\n with open(run_file, 'w') as f:\n json.dump(run_json, f, indent=4)\n\n return \"\", run_file\n\n\n#####\n def estimate_tokens(self, txt):\n # https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them\n word_count = len(txt.split())\n char_count = len(txt)\n return max(int(word_count / 0.75), int(char_count / 4.00))\n\n\n#####\n def set_ui(self):\n st.sidebar.empty()\n with st.sidebar:\n st.text(\"Please check the ? for help\")\n model = st.selectbox(\"model\", options=list(self.models.keys()), index=0, key=\"model\", help=self.model_help)\n m_token = self.models[model]['max_token']\n role = st.selectbox(\"Role\", options=self.gpt_roles, index=0, key=\"input_role\", help = \"Role of the input text\\n\\n\" + self.gpt_roles_help)\n clear_chat = st_toggle_switch(label=\"Clear chat history for next query\", default_value=False, label_after=False, key=\"clear_chat\")\n max_tokens = st.slider('max_tokens', 0, m_token, 1000, 100, \"%i\", \"max_tokens\", \"The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model\\'s context length.\")\n temperature = st.slider('temperature', 0.0, 1.0, 0.5, 0.01, \"%0.2f\", \"temperature\", \"The temperature of the model. Higher temperature results in more surprising text.\")\n presets = st.selectbox(\"Preset\", options=list(self.gpt_presets.keys()), index=0, key=\"presets\", help=self.gpt_presets_help)\n show_tooltip = st_toggle_switch(label=\"Show Tips\", key=\"show_tips\", default_value=True, label_after=False)\n\n if show_tooltip:\n stoggle('Tips', 'GPT provides a simple but powerful interface to any models. You input some text as a prompt, and the model will generate a text completion that attempts to match whatever context or pattern you gave it:<br>- The tool works on text to: answer questions, provide definitions, translate, summarize, and analyze sentiments.<br>- Keep your prompts clear and specific. The tool works best when it has a clear understanding of what you\\'re asking it, so try to avoid vague or open-ended prompts.<br>- Use complete sentences and provide context or background information as needed.<br>- Some presets are available in the sidebar, check their details for more information.<br>A few example prompts (to use with \"None\" preset):<br>- Create a list of 8 questions for a data science interview<br>- Generate an outline for a blog post on MFT<br>- Translate \"bonjour comment allez vous\" in 1. English 2. German 3. Japanese<br>- write python code to display with an image selector from a local directory using OpenCV<br>- Write a creative ad and find a name for a container to run machine learning and computer vision algorithms by providing access to many common ML frameworks<br>- some models support \"Chat\" conversations. If you see the \"Clear Chat\" button, this will be one such model. They also support different max tokens, so adapt accordingly. The \"Clear Chat\" is here to allow you to start a new \"Chat\". Chat models can be given writing styles using the \"system\" \"role\"<br>More examples and hints can be found at https://platform.openai.com/examples')\n\n prompt_value=f\"GPT ({model}) Input\"\n prompt_value += f\" (role: {role})\"\n prompt_value += f\" [max_tokens: {max_tokens} | temperature: {temperature} | preset: {presets}]\"\n prompt = st.empty().text_area(prompt_value, \"\", placeholder=\"Enter your prompt\", key=\"input\")\n\n if st.button(\"Request Answer\", key=\"request_answer\"):\n if cf.isBlank(prompt) or len(prompt) < 10:\n st.error(\"Please provide a prompt of at least 10 characters before requesting an answer\", icon=\"✋\")\n return ()\n\n prompt = self.gpt_presets[presets][\"pre\"] + prompt + self.gpt_presets[presets][\"post\"]\n prompt_token_count = self.estimate_tokens(prompt)\n requested_token_count = prompt_token_count + max_tokens\n used_max_tokens = 0\n if requested_token_count > self.models[model][\"max_token\"]:\n used_max_tokens = self.models[model][\"max_token\"] - prompt_token_count\n if used_max_tokens < 0:\n st.error(\"You have exceeded the maximum number of tokens allowed by the model\", icon=\"✋\")\n else:\n st.warning(\"You requested %i tokens, but the model can only generate %i tokens. Requesting at max %i tokens.\" % (requested_token_count, self.models[model][\"max_token\"], used_max_tokens), icon=\"❌\")\n else:\n used_max_tokens = max_tokens\n\n if used_max_tokens > 0:\n gpt_dest_dir = self.get_dest_dir()\n cf.make_wdir_error(gpt_dest_dir)\n with st.spinner(f\"Asking OpenAI ({model} for {used_max_tokens} tokens with temperature {temperature}. Prompt est. tokens : {prompt_token_count})\"):\n err, run_file = self.chatgpt_it(model, prompt, used_max_tokens, temperature, gpt_dest_dir, clear_chat, role, **self.gpt_presets[presets][\"kwargs\"])\n if cf.isNotBlank(err):\n st.error(err)\n if cf.isNotBlank(run_file):\n st.session_state['last_gpt_query'] = run_file\n st.toast(\"Done\")\n\n\n if self.last_gpt_query in st.session_state:\n run_file = st.session_state[self.last_gpt_query]\n run_json = cf.get_run_file(run_file)\n\n prompt = run_json[\"prompt\"]\n response = run_json[\"response\"]\n messages = []\n if 'messages' in run_json:\n messages = run_json[\"messages\"]\n\n stoggle('Original Prompt', prompt)\n chat_history = \"\"\n if len(messages) > 0:\n chat_history = self.get_chat_history(run_file)\n stoggle('Chat History', chat_history)\n\n option_list = ('Text (no wordwrap)', 'Text (wordwrap, may cause some visual inconsistencies)',\n 'Code (automatic highlighting for supported languages)')\n option = st.selectbox('Display mode:', option_list)\n\n if option == option_list[0]:\n st.text(response)\n elif option == option_list[1]:\n st.markdown(response)\n elif option == option_list[2]:\n st.code(response)\n else:\n st.error(\"Unknown display mode\")\n\n query_output = prompt + \"\\n\\n--------------------------\\n\\n\" + response\n if len(messages) > 1:\n col1, col2, col3 = st.columns(3)\n col1.download_button(label=\"Download Latest Result\", data=response)\n col2.download_button(label=\"Download Latest Query+Result\", data=query_output)\n col3.download_button(label=\"Download Chat Query+Result\", data=chat_history)\n else:\n col1, col2 = st.columns(2)\n col1.download_button(label=\"Download Result\", data=response)\n col2.download_button(label=\"Download Query+Result\", data=query_output)"
},
{
"identifier": "OAI_DallE",
"path": "OpenAI_DallE.py",
"snippet": "class OAI_DallE:\n def __init__(self, apikey, save_location, models_list):\n self.last_dalle_query = 'last_dalle_query'\n\n self.apikey = apikey\n self.save_location = save_location\n\n self.models_supported = models_list\n self.set_parameters(models_list)\n\n self.dalle_modes = {\n \"Image\": \"The image generations endpoint allows you to create an original image given a text prompt. Generated images and maximum number of requested images depends on the model selected. Smaller sizes are faster to generate.\"\n }\n self.dalle_help = \"\"\n for key in self.dalle_modes:\n self.dalle_help += key + \":\\n\"\n self.dalle_help += self.dalle_modes[key] + \"\\n\"\n\n\n#####\n def set_parameters(self, models_list):\n models = {}\n model_help = \"\"\n\n all = {\n \"dall-e-2\":\n {\n \"label\": \"The previous DALL·E model released in Nov 2022. The maximum prompt length is 1000 characters.\",\n \"image_size\": [\"256x256\", \"512x512\", \"1024x1024\"]\n },\n \"dall-e-3\":\n {\n \"label\": \"The latest DALL·E model released in Nov 2023. The maximum prompt length is 4000 characters.\",\n \"image_size\": [\"1024x1024\", \"1024x1792\", \"1792x1024\"] \n }\n }\n\n s_models_list = models_list.split(\",\")\n known_models = list(all.keys())\n for t_model in s_models_list:\n model = t_model.strip()\n if model in all:\n models[model] = all[model]\n else:\n st.error(f\"Unknown model: [{model}] | Known models: {known_models}\")\n cf.error_exit(f\"Unknown model {model}\")\n\n model_help = \"\"\n for key in models:\n model_help += key + \":\\n\"\n model_help += models[key][\"label\"] + \"\\n\"\n model_help += \"image_size: \" + str(models[key][\"image_size\"]) + \"\\n\"\n\n self.models = models\n self.model_help = model_help\n\n\n#####\n def get_dest_dir(self):\n request_time = datetime.today().isoformat()\n return os.path.join(self.save_location, \"dalle\", request_time)\n\n\n#####\n def dalle_it(self, model, prompt, img_size, img_count, dest_dir, **kwargs):\n err = cf.check_existing_dir_w(dest_dir)\n if cf.isNotBlank(err):\n st.error(f\"While checking {dest_dir}: {err}\")\n cf.error_exit(err)\n\n err, response = dalle_call(self.apikey, model, prompt, img_size, img_count, **kwargs)\n if cf.isNotBlank(err):\n return err, \"\"\n\n info_placeholder = st.empty()\n all_images = []\n for i in range(img_count):\n image_name = f\"{dest_dir}/{i + 1}.png\"\n image_url = response.data[i].url\n info_placeholder.info(f\"Downloading result {i + 1} as {image_name}\")\n img_data = requests.get(image_url).content\n with open(image_name, 'wb') as handler:\n handler.write(img_data)\n all_images.append(image_name)\n info_placeholder.empty()\n\n runid = cf.get_runid()\n run_file = f\"{dest_dir}/run---{runid}.json\"\n run_json = {\n \"prompt\": prompt,\n \"images\": all_images,\n }\n with open(run_file, 'w') as f:\n json.dump(run_json, f, indent=4)\n\n return \"\", run_file\n\n\n#####\n def display_dalle_images(self, prompt, all_images):\n img = image_select(\"Prompt: \" + prompt, all_images, use_container_width=False)\n st.image(img)\n path = pathlib.PurePath(img)\n wdir = path.parent.name\n wfile = path.name\n dfile = f\"{wdir}-{wfile}\"\n st.download_button(\"Download Selected\", data=open(img, 'rb').read(), file_name=dfile, mime=\"image/png\", key=\"dalle_download_button\")\n\n\n#####\n def set_ui(self):\n st.sidebar.empty()\n with st.sidebar:\n st.text(\"Please check the ? for help\")\n mode = st.selectbox(\"mode\", options=list(self.dalle_modes.keys()), index=0, key=\"dalle_mode\", help=self.dalle_help)\n model = st.selectbox(\"model\", options=list(self.models.keys()), index=0, key=\"model\", help=self.model_help)\n model_image_size = self.models[model][\"image_size\"]\n img_size = st.selectbox(\"image size\", options=model_image_size, index=0, key=\"dalle_image_size\",\n help=\"Smaller sizes are faster to generate.\")\n\n if model == \"dall-e-2\":\n img_count = st.number_input(\"number of images\", min_value=1, max_value=10, value=1, step=1, key=\"dalle_img_count\",\n help=\"Number of images to generate.\")\n else:\n img_count = 1\n\n kwargs = {}\n if model == \"dall-e-3\":\n quality = st.selectbox(\"quality\", options=[\"standard\", \"hd\"], index=0, key=\"dalle_quality\", help=\"The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image.\")\n style = st.selectbox(\"style\", options=[\"vivid\", \"natural\"], index=0, key=\"dalle_style\", help=\"The style of the generated images. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.\")\n kwargs = {\"quality\": quality, \"style\": style}\n\n show_tooltip = st_toggle_switch(label=\"Show Tips\", key=\"show_tips\", default_value=True, label_after=False)\n\n if show_tooltip:\n stoggle(\n 'Tips',\n 'DALL·E is an AI system that creates realistic images and art from a description in natural language.<br>- The more detailed the description, the more likely you are to get the result that you or your end user want'\n )\n\n prompt_value=f\"DallE {model} Input [image size: {img_size} | image count: {img_count} | Extra: {kwargs}]\"\n prompt = st.empty().text_area(prompt_value, \"\", placeholder=\"Enter your prompt\", key=\"dalle_input\")\n\n if st.button(\"Submit Request\", key=\"dalle_request_answer\"):\n if cf.isBlank(prompt) or len(prompt) < 10:\n st.error(\"Please provide a prompt of at least 10 characters before requesting an answer\", icon=\"✋\")\n return ()\n\n dalle_dest_dir = self.get_dest_dir()\n \n cf.make_wdir_error(dalle_dest_dir)\n with st.spinner(f\"Asking OpenAI for a response...\"):\n err, run_file = self.dalle_it(model, prompt, img_size, img_count, dalle_dest_dir, **kwargs)\n if cf.isNotBlank(err):\n st.error(err)\n if cf.isNotBlank(run_file):\n st.session_state['last_dalle_query'] = run_file\n st.toast(\"Done\")\n\n if self.last_dalle_query in st.session_state:\n run_file = st.session_state[self.last_dalle_query]\n run_json = cf.get_run_file(run_file)\n self.display_dalle_images(run_json['prompt'], run_json['images'])"
}
] | import streamlit as st
import extra_streamlit_components as stx
import re
import os.path
import common_functions as cf
from OpenAI_GPT import OAI_GPT
from OpenAI_DallE import OAI_DallE
from dotenv import load_dotenv
from datetime import datetime | 6,696 | #!/usr/bin/env python3
# Based on
# https://platform.openai.com/docs/quickstart/build-your-application
# https://github.com/openai/openai-python
#####
iti_version="0.9.1"
st.set_page_config(page_title=f"OpenAI API WebUI ({iti_version})", page_icon="🫥", layout="wide", initial_sidebar_state="expanded", menu_items={'Get Help': 'https://github.com/Infotrend-Inc/OpenAI_WebUI', 'About': f"# OpenAI WebUI ({iti_version})\n Brought to you by [Infotrend Inc.](https://www.infotrend.com/)"})
#####
def main():
err = cf.check_file_r(".env", "Environment file")
if cf.isBlank(err):
load_dotenv()
# If the file is not present, hopefully the variable was set in the Docker environemnt
apikey = ''
if 'OPENAI_API_KEY' in os.environ:
apikey = os.environ.get('OPENAI_API_KEY')
if cf.isBlank(apikey):
st.error(f"Could not find the OPENAI_API_KEY environment variable")
cf.error_exit(f"Could not find the OPENAI_API_KEY environment variable")
save_location = ""
if 'OAIWUI_SAVEDIR' in os.environ:
save_location = os.environ.get('OAIWUI_SAVEDIR')
if cf.isBlank(save_location):
st.error(f"Could not find the OAIWUI_SAVEDIR environment variable")
cf.error_exit("Could not find the OAIWUI_SAVEDIR environment variable")
err = cf.check_existing_dir_w(save_location, "OAIWUI_SAVEDIR directory")
if cf.isNotBlank(err):
st.error(f"While ching OAIWUI_SAVEDIR: {err}")
cf.error_exit(f"{err}")
gpt_models = ""
if 'OAIWUI_GPT_MODELS' in os.environ:
gpt_models = os.environ.get('OAIWUI_GPT_MODELS')
else:
st.error(f"Could not find the OAIWUI_GPT_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_GPT_MODELS environment variable")
if cf.isBlank(gpt_models):
st.error(f"OAIWUI_GPT_MODELS environment variable is empty")
cf.error_exit("OAIWUI_GPT_MODELS environment variable is empty")
dalle_models = ""
if 'OAIWUI_DALLE_MODELS' in os.environ:
dalle_models = os.environ.get('OAIWUI_DALLE_MODELS')
else:
st.error(f"Could not find the OAIWUI_DALLE_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_DALLE_MODELS environment variable")
if cf.isBlank(dalle_models):
st.error(f"OAIWUI_DALLE_MODELS environment variable is empty")
cf.error_exit("OAIWUI_DALLE_MODELS environment variable is empty")
username = ""
if 'OAIWUI_USERNAME' in os.environ:
username = os.environ.get('OAIWUI_USERNAME')
if cf.isBlank(username):
st.warning(f"OAIWUI_USERNAME provided but empty, will ask for username")
else:
st.session_state['username'] = username
# Store the initial value of widgets in session state
if "visibility" not in st.session_state:
st.session_state.visibility = "visible"
st.session_state.disabled = False
if 'webui_runid' not in st.session_state:
st.session_state['webui_runid'] = datetime.now().strftime("%Y%m%d-%H%M%S")
st.empty()
# Grab a session-specific value for username
username = ""
if 'username' in st.session_state:
username = st.session_state['username']
if cf.isBlank(username):
st.image("./assets/Infotrend_Logo.png", width=600)
username = st.text_input("Enter a username (unauthorized characters will be replaced by _)")
if st.button("Save username"):
# replace non alphanumeric by _
username = re.sub('[^0-9a-zA-Z]+', '_', username)
if cf.isBlank(username):
st.error(f"Username cannot be empty")
else:
st.session_state['username'] = username
st.rerun()
else:
cf.make_wdir_error(os.path.join(save_location))
cf.make_wdir_error(os.path.join(save_location, iti_version))
long_save_location = os.path.join(save_location, iti_version, username)
cf.make_wdir_error(os.path.join(long_save_location))
cf.make_wdir_error(os.path.join(long_save_location, "dalle"))
cf.make_wdir_error(os.path.join(long_save_location, "gpt"))
set_ui(long_save_location, apikey, gpt_models, dalle_models)
#####
def set_ui(long_save_location, apikey, gpt_models, dalle_models):
oai_gpt = OAI_GPT(apikey, long_save_location, gpt_models)
oai_dalle = None
if 'OAIWUI_GPT_ONLY' in os.environ:
tmp = os.environ.get('OAIWUI_GPT_ONLY')
if tmp == "True":
oai_dalle = None
elif tmp == "False":
| #!/usr/bin/env python3
# Based on
# https://platform.openai.com/docs/quickstart/build-your-application
# https://github.com/openai/openai-python
#####
iti_version="0.9.1"
st.set_page_config(page_title=f"OpenAI API WebUI ({iti_version})", page_icon="🫥", layout="wide", initial_sidebar_state="expanded", menu_items={'Get Help': 'https://github.com/Infotrend-Inc/OpenAI_WebUI', 'About': f"# OpenAI WebUI ({iti_version})\n Brought to you by [Infotrend Inc.](https://www.infotrend.com/)"})
#####
def main():
err = cf.check_file_r(".env", "Environment file")
if cf.isBlank(err):
load_dotenv()
# If the file is not present, hopefully the variable was set in the Docker environemnt
apikey = ''
if 'OPENAI_API_KEY' in os.environ:
apikey = os.environ.get('OPENAI_API_KEY')
if cf.isBlank(apikey):
st.error(f"Could not find the OPENAI_API_KEY environment variable")
cf.error_exit(f"Could not find the OPENAI_API_KEY environment variable")
save_location = ""
if 'OAIWUI_SAVEDIR' in os.environ:
save_location = os.environ.get('OAIWUI_SAVEDIR')
if cf.isBlank(save_location):
st.error(f"Could not find the OAIWUI_SAVEDIR environment variable")
cf.error_exit("Could not find the OAIWUI_SAVEDIR environment variable")
err = cf.check_existing_dir_w(save_location, "OAIWUI_SAVEDIR directory")
if cf.isNotBlank(err):
st.error(f"While ching OAIWUI_SAVEDIR: {err}")
cf.error_exit(f"{err}")
gpt_models = ""
if 'OAIWUI_GPT_MODELS' in os.environ:
gpt_models = os.environ.get('OAIWUI_GPT_MODELS')
else:
st.error(f"Could not find the OAIWUI_GPT_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_GPT_MODELS environment variable")
if cf.isBlank(gpt_models):
st.error(f"OAIWUI_GPT_MODELS environment variable is empty")
cf.error_exit("OAIWUI_GPT_MODELS environment variable is empty")
dalle_models = ""
if 'OAIWUI_DALLE_MODELS' in os.environ:
dalle_models = os.environ.get('OAIWUI_DALLE_MODELS')
else:
st.error(f"Could not find the OAIWUI_DALLE_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_DALLE_MODELS environment variable")
if cf.isBlank(dalle_models):
st.error(f"OAIWUI_DALLE_MODELS environment variable is empty")
cf.error_exit("OAIWUI_DALLE_MODELS environment variable is empty")
username = ""
if 'OAIWUI_USERNAME' in os.environ:
username = os.environ.get('OAIWUI_USERNAME')
if cf.isBlank(username):
st.warning(f"OAIWUI_USERNAME provided but empty, will ask for username")
else:
st.session_state['username'] = username
# Store the initial value of widgets in session state
if "visibility" not in st.session_state:
st.session_state.visibility = "visible"
st.session_state.disabled = False
if 'webui_runid' not in st.session_state:
st.session_state['webui_runid'] = datetime.now().strftime("%Y%m%d-%H%M%S")
st.empty()
# Grab a session-specific value for username
username = ""
if 'username' in st.session_state:
username = st.session_state['username']
if cf.isBlank(username):
st.image("./assets/Infotrend_Logo.png", width=600)
username = st.text_input("Enter a username (unauthorized characters will be replaced by _)")
if st.button("Save username"):
# replace non alphanumeric by _
username = re.sub('[^0-9a-zA-Z]+', '_', username)
if cf.isBlank(username):
st.error(f"Username cannot be empty")
else:
st.session_state['username'] = username
st.rerun()
else:
cf.make_wdir_error(os.path.join(save_location))
cf.make_wdir_error(os.path.join(save_location, iti_version))
long_save_location = os.path.join(save_location, iti_version, username)
cf.make_wdir_error(os.path.join(long_save_location))
cf.make_wdir_error(os.path.join(long_save_location, "dalle"))
cf.make_wdir_error(os.path.join(long_save_location, "gpt"))
set_ui(long_save_location, apikey, gpt_models, dalle_models)
#####
def set_ui(long_save_location, apikey, gpt_models, dalle_models):
oai_gpt = OAI_GPT(apikey, long_save_location, gpt_models)
oai_dalle = None
if 'OAIWUI_GPT_ONLY' in os.environ:
tmp = os.environ.get('OAIWUI_GPT_ONLY')
if tmp == "True":
oai_dalle = None
elif tmp == "False": | oai_dalle = OAI_DallE(apikey, long_save_location, dalle_models) | 1 | 2023-11-09 16:01:20+00:00 | 8k |
serl-robot/serl | serl/agents/ddpg/pixel_ddpg_learner.py | [
{
"identifier": "batched_random_crop",
"path": "serl/utils/augmentations.py",
"snippet": "def batched_random_crop(key, obs, pixel_key, padding=4):\n imgs = obs[pixel_key]\n keys = jax.random.split(key, imgs.shape[0])\n imgs = jax.vmap(random_crop, (0, 0, None))(keys, imgs, padding)\n return obs.copy(add_or_replace={pixel_key: imgs})"
},
{
"identifier": "DDPGLearner",
"path": "serl/agents/ddpg/ddpg_learner.py",
"snippet": "class DDPGLearner(Agent):\n critic: TrainState\n target_critic: TrainState\n tau: float\n discount: float\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n ):\n \"\"\"\n An implementation of DDPG\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key = jax.random.split(rng, 3)\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_def = TanhNormal(actor_base_cls, action_dim)\n actor_params = actor_def.init(actor_key, observations)[\"params\"]\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_def = Ensemble(critic_cls, num=1)\n critic_params = critic_def.init(critic_key, observations, actions)[\"params\"]\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n\n target_critic_def = Ensemble(critic_cls, num=1)\n target_critic = TrainState.create(\n apply_fn=target_critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n tau=tau,\n discount=discount,\n )\n\n @jax.jit\n def compute_actions(self, observations:dict) -> Tuple[jnp.ndarray, jnp.ndarray]:\n '''\n sample actions from the actor with a small amount of noise injected\n TODO: make the noise scale configurable\n\n :param observations: a batch of observations\n :return: actions and jax random key\n '''\n key, rng = jax.random.split(self.rng)\n dist = self.actor.apply_fn({\"params\": self.actor.params}, observations)\n actions = dist.sample(seed=key)\n key, rng = jax.random.split(self.rng)\n actions += jax.random.normal(key, shape=actions.shape) * 0.05\n return actions, rng\n\n def sample_actions(self, observations: np.ndarray) -> np.ndarray:\n '''\n sample actions from the actor with a small amount of noise injected, and convert to numpy array on CPU\n update agent's rng\n\n :param observations: a batch of observations\n :return: actions in numpy array and jax random key\n '''\n actions, rng = self.compute_actions(observations)\n return np.asarray(actions), self.replace(rng=rng)\n\n def update_actor(self, batch: DatasetDict) -> Tuple[Agent, Dict[str, float]]:\n '''\n update DDPG actor\n\n :param observations: a batch of observations\n :return: updated agent and info dict\n '''\n key, rng = jax.random.split(self.rng)\n key2, rng = jax.random.split(rng)\n\n def actor_loss_fn(actor_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n dist = self.actor.apply_fn({\"params\": actor_params}, batch[\"observations\"])\n actions = dist.sample(seed=key)\n qs = self.critic.apply_fn(\n {\"params\": self.critic.params},\n batch[\"observations\"],\n actions,\n True,\n rngs={\"dropout\": key2},\n ) # training=True\n q = qs.mean(axis=0)\n actor_loss = (-q).mean()\n return actor_loss, {\"actor_loss\": actor_loss}\n\n grads, actor_info = jax.grad(actor_loss_fn, has_aux=True)(self.actor.params)\n actor = self.actor.apply_gradients(grads=grads)\n\n return self.replace(actor=actor, rng=rng), actor_info\n\n def update_critic(self, batch: DatasetDict) -> Tuple[TrainState, Dict[str, float]]:\n '''\n update DDPG critic\n\n :param observations: a batch of observations\n :return: updated agent and info dict\n '''\n dist = self.actor.apply_fn(\n {\"params\": self.actor.params}, batch[\"next_observations\"]\n )\n\n rng = self.rng\n\n key, rng = jax.random.split(rng)\n next_actions = dist.sample(seed=key)\n\n # Used only if there is an ensemble of Qs, which is not the case in DDPG.\n key, rng = jax.random.split(rng)\n target_params = subsample_ensemble(\n key, self.target_critic.params, 1, 1\n )\n\n key, rng = jax.random.split(rng)\n next_qs = self.target_critic.apply_fn(\n {\"params\": target_params},\n batch[\"next_observations\"],\n next_actions,\n True,\n rngs={\"dropout\": key},\n ) # training=True\n\n target_q = batch[\"rewards\"] + self.discount * batch[\"masks\"] * next_qs\n\n key, rng = jax.random.split(rng)\n\n def critic_loss_fn(critic_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n qs = self.critic.apply_fn(\n {\"params\": critic_params},\n batch[\"observations\"],\n batch[\"actions\"],\n True,\n rngs={\"dropout\": key},\n ) # training=True\n critic_loss = ((qs - target_q) ** 2).mean()\n return critic_loss, {\"critic_loss\": critic_loss, \"q\": qs.mean()}\n\n grads, info = jax.grad(critic_loss_fn, has_aux=True)(self.critic.params)\n critic = self.critic.apply_gradients(grads=grads)\n\n target_critic_params = optax.incremental_update(\n critic.params, self.target_critic.params, self.tau\n )\n target_critic = self.target_critic.replace(params=target_critic_params)\n\n return self.replace(critic=critic, target_critic=target_critic, rng=rng), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n '''\n update DDPG actor and critic updates\n\n :param observations: a batch of observations\n :param utd_ratio: number of critic updates per actor update\n :return: updated agent and info dict\n '''\n\n new_agent = self\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_agent, critic_info = new_agent.update_critic(mini_batch)\n\n new_agent, actor_info = new_agent.update_actor(mini_batch)\n\n return new_agent, {**actor_info, **critic_info}"
},
{
"identifier": "DatasetDict",
"path": "serl/data/dataset.py",
"snippet": "def _check_lengths(dataset_dict: DatasetDict, dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(\n dataset_dict: Union[np.ndarray, DatasetDict], indx: np.ndarray\n) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n def _sample_jax(rng, src, max_indx: int):\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n def normalize_returns(self, scaling: float = 1000):\nclass Dataset(object):"
},
{
"identifier": "TanhNormal",
"path": "serl/distributions/tanh_normal.py",
"snippet": "class Normal(nn.Module):\n def __call__(self, inputs, *args, **kwargs) -> tfd.Distribution:"
},
{
"identifier": "Ensemble",
"path": "serl/networks/ensemble.py",
"snippet": "class Ensemble(nn.Module):\n net_cls: Type[nn.Module]\n num: int = 2\n\n @nn.compact\n def __call__(self, *args):\n ensemble = nn.vmap(\n self.net_cls,\n variable_axes={\"params\": 0},\n split_rngs={\"params\": True, \"dropout\": True},\n in_axes=None,\n out_axes=0,\n axis_size=self.num,\n )\n return ensemble()(*args)"
},
{
"identifier": "MLP",
"path": "serl/networks/mlp.py",
"snippet": "class MLP(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n activate_final: bool = False\n use_layer_norm: bool = False\n scale_final: Optional[float] = None\n dropout_rate: Optional[float] = None\n spectral_norm: bool = False\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:\n\n for i, size in enumerate(self.hidden_dims):\n if i + 1 == len(self.hidden_dims) and self.scale_final is not None:\n x = nn.Dense(size, kernel_init=default_init(self.scale_final))(x)\n else:\n x = nn.Dense(size, kernel_init=default_init())(x)\n\n if i + 1 < len(self.hidden_dims) or self.activate_final:\n if self.dropout_rate is not None and self.dropout_rate > 0:\n x = nn.Dropout(rate=self.dropout_rate)(\n x, deterministic=not training\n )\n if self.use_layer_norm:\n x = nn.LayerNorm()(x)\n x = self.activations(x)\n return x"
},
{
"identifier": "PixelMultiplexer",
"path": "serl/networks/pixel_multiplexer.py",
"snippet": "class PixelMultiplexer(nn.Module):\n encoder_cls: Type[nn.Module]\n network_cls: Type[nn.Module]\n latent_dim: int\n stop_gradient: bool = False\n pixel_keys: Tuple[str, ...] = (\"pixels\",)\n depth_keys: Tuple[str, ...] = ()\n\n @nn.compact\n def __call__(\n self,\n observations: Union[FrozenDict, Dict],\n actions: Optional[jnp.ndarray] = None,\n training: bool = False,\n ) -> jnp.ndarray:\n observations = FrozenDict(observations)\n image_obs, state_obs = observations.pop(\"state\")\n reshape_img = lambda x: x.reshape(*x.shape[:-2], -1) / 255.0\n image_obs = jax.tree_map(reshape_img, image_obs)\n\n x = self.encoder_cls(name=f\"image_encoder\")(image_obs, training)\n if self.stop_gradient:\n # We do not update conv layers with policy gradients.\n x = jax.lax.stop_gradient(x)\n x = nn.Dense(512, kernel_init=default_init())(x)\n x = nn.LayerNorm()(x)\n x = nn.tanh(x)\n\n if \"state\" in observations:\n y = nn.Dense(self.latent_dim, kernel_init=default_init())(\n observations[\"state\"]\n )\n y = nn.LayerNorm()(y)\n y = nn.tanh(y)\n\n x = jnp.concatenate([x, y], axis=-1)\n\n if actions is None:\n return self.network_cls()(x, training)\n else:\n return self.network_cls()(x, actions, training)"
},
{
"identifier": "StateActionValue",
"path": "serl/networks/state_action_value.py",
"snippet": "class StateActionValue(nn.Module):\n base_cls: nn.Module\n\n @nn.compact\n def __call__(\n self, observations: jnp.ndarray, actions: jnp.ndarray, *args, **kwargs\n ) -> jnp.ndarray:\n inputs = jnp.concatenate([observations, actions], axis=-1)\n outputs = self.base_cls()(inputs, *args, **kwargs)\n\n value = nn.Dense(1, kernel_init=default_init())(outputs)\n\n return jnp.squeeze(value, -1)"
},
{
"identifier": "TwoD4PGEncoder",
"path": "serl/networks/encoders/two_d4pg_encoder.py",
"snippet": "class TwoD4PGEncoder(nn.Module):\n features: Sequence[int] = (32, 32, 32, 32)\n filters: Sequence[int] = (2, 1, 1, 1)\n strides: Sequence[int] = (2, 1, 1, 1)\n padding: str = \"VALID\"\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n\n @nn.compact\n def __call__(self, x: jnp.ndarray, training=False) -> jnp.ndarray:\n assert len(self.features) == len(self.strides)\n\n processed_tensors = []\n reshape = False\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply Conv layers\n for features, filter_, stride in zip(self.features, self.filters, self.strides):\n tensor = nn.Conv(\n features,\n kernel_size=(filter_, filter_),\n strides=(stride, stride),\n kernel_init=default_init(),\n padding=self.padding,\n )(tensor)\n tensor = self.activations(tensor)\n\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor"
},
{
"identifier": "TwoMobileNetEncoder",
"path": "serl/networks/encoders/two_mobilenet_encoder.py",
"snippet": "class TwoMobileNetEncoder(nn.Module):\n mobilenet: nn.Module\n params: FrozenDict\n dropout_rate: float = 0.1\n\n @nn.compact\n def __call__(self, x: FrozenDict[str, jnp.ndarray], training=False) -> jnp.ndarray:\n processed_tensors = []\n reshape = False\n mean = jnp.array((0.485, 0.456, 0.406))[None, ...]\n std = jnp.array((0.229, 0.224, 0.225))[None, ...]\n\n # Loop through all the tensors in the input FrozenDict\n for key, tensor in x.items():\n # Expand dimensions if they are 3\n if tensor.ndim == 3:\n tensor = tensor[None, ...]\n reshape = True\n\n # Apply mobilenet\n tensor = (tensor - mean) / std # normalize using ImageNet stats\n tensor = self.mobilenet.apply(self.params, tensor, training=False)\n # Apply SpatialLearnedEmbeddings and Dropout\n tensor = SpatialLearnedEmbeddings(*(tensor.shape[1:]), 8)(tensor)\n tensor = nn.Dropout(self.dropout_rate)(tensor, deterministic=not training)\n\n processed_tensors.append(tensor)\n\n # Concatenate all processed tensors along the last axis\n concatenated_tensor = jnp.concatenate(processed_tensors, axis=-1)\n\n # Reshape if original tensors were 3D\n if reshape:\n concatenated_tensor = concatenated_tensor.reshape(-1)\n\n return concatenated_tensor"
},
{
"identifier": "_unpack",
"path": "serl/utils/commons.py",
"snippet": "def _unpack(batch: DatasetDict):\n '''\n Helps to minimize CPU to GPU transfer.\n Assuming that if next_observation is missing, it's combined with observation:\n\n :param batch: a batch of data from the replay buffer, a dataset dict\n :return: a batch of unpacked data, a dataset dict\n '''\n\n for pixel_key in batch[\"observations\"].keys():\n if pixel_key not in batch[\"next_observations\"]:\n obs_pixels = batch[\"observations\"][pixel_key][..., :-1]\n next_obs_pixels = batch[\"observations\"][pixel_key][..., 1:]\n\n obs = batch[\"observations\"].copy(add_or_replace={pixel_key: obs_pixels})\n next_obs = batch[\"next_observations\"].copy(\n add_or_replace={pixel_key: next_obs_pixels}\n )\n batch = batch.copy(\n add_or_replace={\"observations\": obs, \"next_observations\": next_obs}\n )\n\n return batch"
},
{
"identifier": "_share_encoder",
"path": "serl/utils/commons.py",
"snippet": "def _share_encoder(source, target):\n '''\n Share encoder params between source and target:\n \n :param source: the source network, TrainState\n :param target: the target network, TrainState\n '''\n\n replacers = {}\n for k, v in source.params.items():\n if \"encoder\" in k:\n replacers[k] = v\n\n # e.g., Use critic conv layers in actor:\n new_params = target.params.copy(add_or_replace=replacers)\n return target.replace(params=new_params)"
}
] | from functools import partial
from itertools import zip_longest
from typing import Callable, Dict, Optional, Sequence, Tuple, OrderedDict
from collections import OrderedDict
from jax import numpy as jnp
from flax import struct
from flax.core import FrozenDict, freeze
from flax.training.train_state import TrainState
from serl.utils.augmentations import batched_random_crop
from serl.agents.ddpg.ddpg_learner import DDPGLearner
from serl.data.dataset import DatasetDict
from serl.distributions import TanhNormal
from serl.networks import MLP, Ensemble, PixelMultiplexer, StateActionValue
from serl.networks.encoders import TwoMobileNetEncoder, TwoD4PGEncoder
from serl.utils.commons import _unpack, _share_encoder
from jax_resnet import pretrained_resnet, slice_variables
from jeffnet.linen import create_model, EfficientNet
import gym
import jax
import optax
import flax.linen as nn | 5,543 | """Implementations of algorithms for continuous control."""
# from flax.training import checkpoints
class PixelDDPGLearner(DDPGLearner):
data_augmentation_fn: Callable = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
):
"""
An implementation of pixel-based DDPG
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key = jax.random.split(rng, 3)
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
# TODO: option 1 refactor this to use ResNet from huggingface, option 2 use jax_resnet
raise NotImplementedError
ResNet, resnet_variables = pretrained_resnet(18)
ResNet = ResNet()
ResNet = nn.Sequential(ResNet.layers[0:3])
resnet_variables = slice_variables(resnet_variables, end=3)
encoder_cls = partial(TwoResNetEncoder, resnet=ResNet, params=resnet_variables)
elif encoder == "mobilenet":
# TODO: unfortunately, huggingface does not support many visual encoders in JAX, so we have to reply on https://github.com/Leo428/efficientnet-jax, forked from @rwightman
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)
actor_def = PixelMultiplexer(
encoder_cls=encoder_cls,
network_cls=actor_cls,
latent_dim=latent_dim,
stop_gradient=True, # do not update the encoder params
pixel_keys=pixel_keys,
depth_keys=depth_keys,
)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
| """Implementations of algorithms for continuous control."""
# from flax.training import checkpoints
class PixelDDPGLearner(DDPGLearner):
data_augmentation_fn: Callable = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
):
"""
An implementation of pixel-based DDPG
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key = jax.random.split(rng, 3)
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
# TODO: option 1 refactor this to use ResNet from huggingface, option 2 use jax_resnet
raise NotImplementedError
ResNet, resnet_variables = pretrained_resnet(18)
ResNet = ResNet()
ResNet = nn.Sequential(ResNet.layers[0:3])
resnet_variables = slice_variables(resnet_variables, end=3)
encoder_cls = partial(TwoResNetEncoder, resnet=ResNet, params=resnet_variables)
elif encoder == "mobilenet":
# TODO: unfortunately, huggingface does not support many visual encoders in JAX, so we have to reply on https://github.com/Leo428/efficientnet-jax, forked from @rwightman
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)
actor_def = PixelMultiplexer(
encoder_cls=encoder_cls,
network_cls=actor_cls,
latent_dim=latent_dim,
stop_gradient=True, # do not update the encoder params
pixel_keys=pixel_keys,
depth_keys=depth_keys,
)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
) | critic_cls = partial(StateActionValue, base_cls=critic_base_cls) | 7 | 2023-11-02 23:32:24+00:00 | 8k |
daily-demos/ai-meeting-assistant | server/call/session.py | [
{
"identifier": "BotConfig",
"path": "server/config.py",
"snippet": "class BotConfig:\n _openai_api_key: str = None\n _openai_model_name: str = None\n _log_dir_path: str = None\n _daily_room_url: str = None\n _daily_meeting_token: str = None\n\n def __init__(self,\n openai_api_key: str,\n openai_model_name: str,\n daily_room_url: str = None,\n daily_meeting_token: str = None,\n log_dir_path: str = None):\n self._openai_api_key = openai_api_key\n self._openai_model_name = openai_model_name\n self._log_dir_path = log_dir_path\n self._daily_room_url = daily_room_url\n self._daily_meeting_token = daily_meeting_token\n\n @property\n def openai_model_name(self) -> str:\n return self._openai_model_name\n\n @property\n def openai_api_key(self) -> str:\n return self._openai_api_key\n\n @property\n def log_dir_path(self) -> str:\n return self._log_dir_path\n\n @property\n def daily_room_url(self) -> str:\n return self._daily_room_url\n\n @property\n def daily_meeting_token(self) -> str:\n return self._daily_meeting_token\n\n def get_log_file_path(self, room_name: str) -> str | None:\n \"\"\"Returns the log file for the given room name\"\"\"\n if not self.log_dir_path:\n return None\n return os.path.join(self.log_dir_path, f\"{room_name}.log\")\n\n def ensure_dirs(self):\n \"\"\"Creates required file directories if they do not already exist.\"\"\"\n if self.log_dir_path:\n ensure_dir(self.log_dir_path)"
},
{
"identifier": "get_headless_config",
"path": "server/config.py",
"snippet": "def get_headless_config() -> BotConfig:\n dotenv_path = join(dirname(dirname(abspath(__file__))), '.env')\n load_dotenv(dotenv_path)\n\n parser = argparse.ArgumentParser(description='Start a session.')\n parser.add_argument(\n '--room_url',\n type=str,\n default=os.environ.get('ROOM_URL'),\n help='URL of the room')\n parser.add_argument(\n '--oai_api_key',\n type=str,\n default=os.environ.get('OPENAI_API_KEY'),\n help='OpenAI API key')\n parser.add_argument(\n '--oai_model_name',\n type=str,\n default=os.environ.get('OPENAI_MODEL_NAME'),\n help='OpenAI API URL')\n parser.add_argument(\n '--daily_meeting_token',\n type=str,\n default=None,\n help='Daily meetng token')\n parser.add_argument(\n '--log_dir_name',\n type=str,\n default=None,\n help='Log dir name')\n args = parser.parse_args()\n\n ldn = args.log_dir_name\n ldp = None\n if ldn:\n ldp = os.path.abspath(ldn)\n return BotConfig(args.oai_api_key, args.oai_model_name,\n args.room_url, args.daily_meeting_token, ldp)"
},
{
"identifier": "OpenAIAssistant",
"path": "server/llm/openai_assistant.py",
"snippet": "class OpenAIAssistant(Assistant):\n \"\"\"Class that implements assistant features using the OpenAI API\"\"\"\n _client: OpenAI = None\n\n _oai_assistant_id: int = None\n _oai_summary_thread_id: int = None\n _model_name: str = None\n _logger: logging.Logger = None\n\n # For now, just store context in memory.\n _raw_context: deque([ChatCompletionMessageParam]) = None\n _clean_transcript: str = None\n _clean_transcript_running: bool = False\n _summary_context: str = None\n\n # Process 20 context items at a time.\n _transcript_batch_size: int = 25\n\n _default_transcript_prompt = ChatCompletionSystemMessageParam(content=\"\"\"\n Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to:\n\n 1. Correct punctuation and spelling mistakes.\n 2. Merge broken sentences into complete ones.\n 3. Remove timestamps and transcript types.\n 4. Clearly indicate the speaker's name at the beginning of their dialogue.\n\n Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability.\n \"\"\",\n role=\"system\")\n\n _default_prompt = \"\"\"\n Primary Instruction:\n Based on the provided meeting transcripts, please create a concise summary.\n Your summary should include:\n\n 1. Key discussion points.\n 2. Decisions made.\n 3. Action items assigned.\n\n Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call.\n \"\"\"\n\n def __init__(self, api_key: str, model_name: str = None,\n logger: logging.Logger = None):\n if not api_key:\n raise Exception(\"OpenAI API key not provided, but required.\")\n\n self._raw_context = deque()\n self._summary_context = \"\"\n self._clean_transcript = \"\"\n self._logger = logger\n if not model_name:\n model_name = \"gpt-4-1106-preview\"\n self._model_name = model_name\n self._client = OpenAI(\n api_key=api_key,\n )\n self._oai_assistant_id = self.get_or_create_assistant(model_name)\n\n def get_or_create_assistant(self, model_name) -> str:\n \"\"\"Gets or creates an OpenAI assistant\"\"\"\n all_assistants = self._client.beta.assistants.list()\n for assistant in all_assistants.data:\n if assistant.name == _assistant_name and assistant.instructions == self._default_prompt:\n return assistant.id\n return self._client.beta.assistants.create(name=_assistant_name, description=\"Daily meeting summary assistant\",\n instructions=self._default_prompt,\n model=model_name).id\n\n def destroy(self):\n \"\"\"Destroys the assistant and relevant resources\"\"\"\n self._logger.info(\n \"Destroying thread (%s) and assistant (%s)\",\n self._oai_summary_thread_id,\n self._oai_assistant_id)\n bc = self._client.beta\n if self._oai_summary_thread_id:\n bc.threads.delete(self._oai_summary_thread_id)\n\n if self._oai_assistant_id:\n bc.assistants.delete(self._oai_assistant_id)\n\n def register_new_context(self, new_text: str, metadata: list[str] = None):\n \"\"\"Registers new context (usually a transcription line).\"\"\"\n content = self._compile_ctx_content(new_text, metadata)\n user_msg = ChatCompletionUserMessageParam(content=content, role=\"user\")\n self._raw_context.append(user_msg)\n\n def get_clean_transcript(self) -> str:\n \"\"\"Returns latest clean transcript.\"\"\"\n return self._clean_transcript\n\n async def cleanup_transcript(self) -> str:\n \"\"\"Cleans up transcript from raw context.\"\"\"\n if self._clean_transcript_running:\n raise Exception(\"Clean transcript process already running\")\n\n # Set this bool to ensure only one cleanup process\n # is running at a time.\n self._clean_transcript_running = True\n\n if len(self._raw_context) == 0:\n self._clean_transcript_running = False\n raise NoContextError()\n\n if self._oai_summary_thread_id:\n active_runs = self._client.beta.threads.runs.list(\n self._oai_summary_thread_id)\n if len(active_runs.data) > 0:\n self._clean_transcript_running = False\n active_statuses = [\"in-progress\"]\n for run in active_runs.data:\n if run.status in active_statuses:\n self._logger.info(\n \"Active run, won't clean transcript: %s (%s)\", run, run.status)\n return\n\n # How many transcript lines to process\n to_fetch = self._transcript_batch_size\n\n to_process = []\n ctx = self._raw_context\n\n # Fetch the next batch of transcript lines\n while to_fetch > 0 and ctx:\n next_line = ctx.popleft()\n to_process.append(next_line)\n # If we're at the end of the batch size but did not\n # get what appears to be a full sentence, just keep going.\n if to_fetch == 1 and \".\" not in next_line.content:\n continue\n to_fetch -= 1\n\n messages = to_process + [self._default_transcript_prompt]\n try:\n loop = asyncio.get_event_loop()\n future = loop.run_in_executor(\n None, self._make_openai_request, messages)\n res = await future\n self._clean_transcript += f\"\\n\\n{res}\"\n\n # Create a new OpenAI summary thread if it does not yet exist.\n if not self._oai_summary_thread_id:\n self._create_summary_thread()\n\n # Append new message with this batch of cleaned-up transcript to\n # thread\n self._client.beta.threads.messages.create(\n self._oai_summary_thread_id, content=res, role=\"user\")\n self._clean_transcript_running = False\n except Exception as e:\n # Re-insert failed items into the queue,\n # to make sure they do not get lost on next attempt.\n for item in reversed(to_process):\n self._raw_context.appendleft(item)\n self._clean_transcript_running = False\n raise Exception(f\"Failed to query OpenAI: {e}\") from e\n\n def _create_summary_thread(self):\n \"\"\"Creates a new OpenAI thread to store the summary context in\"\"\"\n thread = self._client.beta.threads.create()\n self._oai_summary_thread_id = thread.id\n\n async def query(self, custom_query: str = None) -> str:\n \"\"\"Submits a query to OpenAI with the stored context if one is provided.\n If a query is not provided, uses the default.\"\"\"\n if not self._oai_summary_thread_id:\n raise NoContextError()\n\n try:\n loop = asyncio.get_event_loop()\n future: asyncio.Future = None\n if not custom_query:\n future = loop.run_in_executor(\n None, self._make_openai_thread_request, self._oai_summary_thread_id)\n else:\n future = loop.run_in_executor(\n None, self._make_openai_request, [\n ChatCompletionUserMessageParam(\n content=self._clean_transcript, role=\"user\"),\n ChatCompletionSystemMessageParam(content=custom_query, role=\"system\")])\n res = await future\n return res\n except Exception as e:\n if \"No assistant found\" in str(e):\n self._oai_assistant_id = self.get_or_create_assistant(self._model_name)\n return await self.query(custom_query)\n raise Exception(f\"Failed to query OpenAI thread: {e}\") from e\n\n def _compile_ctx_content(self, new_text: str,\n metadata: list[str] = None) -> str:\n \"\"\"Compiles context content from the provided text and metadata.\"\"\"\n content = \"\"\n if metadata:\n content += f\"[{' | '.join(metadata)}] \"\n content += new_text\n return content\n\n def _make_openai_request(\n self, messages: list[ChatCompletionMessageParam]) -> str:\n \"\"\"Makes a chat completion request to OpenAI and returns the response.\"\"\"\n res = self._client.chat.completions.create(\n model=self._model_name,\n messages=messages,\n temperature=0,\n )\n\n for choice in res.choices:\n reason = choice.finish_reason\n if reason == \"stop\" or reason == \"length\":\n answer = choice.message.content\n return answer\n raise Exception(\n \"No usable choice found in OpenAI response: %s\",\n res.choices)\n\n def _make_openai_thread_request(\n self, thread_id: list) -> str:\n \"\"\"Creates a thread run and returns the response.\"\"\"\n\n threads = self._client.beta.threads\n run = threads.runs.create(\n assistant_id=self._oai_assistant_id,\n thread_id=thread_id,\n )\n while run.status != \"completed\":\n run = threads.runs.retrieve(\n thread_id=thread_id,\n run_id=run.id\n )\n\n messages = threads.messages.list(\n thread_id=thread_id,\n )\n msg_data = messages.data[0]\n answer = msg_data.content[0].text.value\n return answer"
},
{
"identifier": "Assistant",
"path": "server/llm/assistant.py",
"snippet": "class Assistant(ABC):\n \"\"\"Abstract class defining methods that should be implemented by any assistant\"\"\"\n\n @abstractmethod\n def register_new_context(self, new_text: str,\n name: list[str] = None) -> str:\n \"\"\"Registers new context (usually a transcription line).\"\"\"\n\n @abstractmethod\n async def query(self, custom_query: str) -> str:\n \"\"\"Runs a query against the assistant and returns the answer.\"\"\"\n\n @abstractmethod\n def get_clean_transcript(self) -> str:\n \"\"\"Returns latest clean transcript.\"\"\"\n\n @abstractmethod\n async def cleanup_transcript(self) -> str:\n \"\"\"Cleans up transcript from raw context.\"\"\"\n\n @abstractmethod\n def destroy(self) -> str:\n \"\"\"Destroys the assistant.\"\"\""
},
{
"identifier": "NoContextError",
"path": "server/llm/assistant.py",
"snippet": "class NoContextError(Exception):\n \"\"\"Raised when a query is made but no context is available\"\"\"\n\n def __init__(self):\n m = \"No context available.\"\n super().__init__(m)"
}
] | import asyncio
import atexit
import dataclasses
import json
import logging
import os.path
import sys
import threading
import time
from asyncio import Future
from datetime import datetime
from logging import Logger
from typing import Mapping, Any
from urllib.parse import urlparse
from daily import Daily, EventHandler, CallClient
from server.config import BotConfig, get_headless_config
from server.llm.openai_assistant import OpenAIAssistant
from server.llm.assistant import Assistant, NoContextError | 3,626 | """Class representing a single meeting happening within a Daily room.
This is responsible for all Daily operations."""
from __future__ import annotations
@dataclasses.dataclass
class Room:
"""Class representing a Daily video call room"""
url: str = None
token: str = None
name: str = None
@dataclasses.dataclass
class Summary:
"""Class representing a Daily meeting summary"""
content: str
retrieved_at: time.time()
class Session(EventHandler):
"""Class representing a single meeting happening within a Daily room."""
| """Class representing a single meeting happening within a Daily room.
This is responsible for all Daily operations."""
from __future__ import annotations
@dataclasses.dataclass
class Room:
"""Class representing a Daily video call room"""
url: str = None
token: str = None
name: str = None
@dataclasses.dataclass
class Summary:
"""Class representing a Daily meeting summary"""
content: str
retrieved_at: time.time()
class Session(EventHandler):
"""Class representing a single meeting happening within a Daily room."""
| _config: BotConfig | 0 | 2023-11-02 11:17:16+00:00 | 8k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py | [
{
"identifier": "LinearOperator",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py",
"snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self"
},
{
"identifier": "Lazy",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py",
"snippet": "class Lazy(LinearOperator):\n \"\"\" Linear operator with lazy evaluation \"\"\"\n def __init__(self, dense_matrix):\n super().__init__()\n self.A = dense_matrix\n self.init(self.A.dtype, self.A.shape, self.A.device)\n\n def _matmat(self, V):\n A, V = device_cast(self.A, V)\n A, V = dtype_cast(A, V)\n return A@V\n\n def _matvec(self, v):\n A, v = device_cast(self.A, v)\n A, v = dtype_cast(A, v)\n return A@v\n\n def _rmatmat(self, V):\n A, V = device_cast(self.A, V)\n A, V = dtype_cast(A, V)\n return A.t()@V\n\n def _rmatvec(self, v):\n A, v = device_cast(self.A, v)\n A, v = dtype_cast(A, v)\n return A.t()@v\n\n def to_dense(self):\n return self.A\n\n def invt(self):\n return Lazy(torch.linalg.inv(self.A).t())\n\n def to(self, device):\n self.A = self.A.to(device)\n self.device = self.A.device\n return self"
},
{
"identifier": "dtype_cast",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def dtype_cast(A, B):\n \"\"\" Casts A and B to the same dtype, preferring complex dtypes over real dtypes. \"\"\"\n if A.dtype in (torch.complex64, torch.complex128):\n B = B.to(A.dtype)\n if B.dtype in (torch.complex64, torch.complex128):\n A = A.to(B.dtype)\n return A, B"
},
{
"identifier": "device_cast",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def device_cast(A, B):\n \"\"\" Casts A and B to the same device, preferring GPU over CPU. \"\"\"\n if A.device.type == 'cuda':\n B = B.to(A.device)\n if B.device.type == 'cuda':\n A = A.to(B.device)\n return A, B"
},
{
"identifier": "get_device",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')"
}
] | from functools import reduce
from .linear_operator_base import LinearOperator, Lazy
from .utils import dtype_cast, device_cast, get_device
import torch | 4,554 | """ Abstract linear algebra library. """
def product(c):
""" Product of a list of numbers. """
return reduce(lambda a, b: a*b, c)
def lazify(x):
""" Convert a tensor LinearOperator. """
if isinstance(x, LinearOperator):
return x
if torch.is_tensor(x):
| """ Abstract linear algebra library. """
def product(c):
""" Product of a list of numbers. """
return reduce(lambda a, b: a*b, c)
def lazify(x):
""" Convert a tensor LinearOperator. """
if isinstance(x, LinearOperator):
return x
if torch.is_tensor(x): | return Lazy(x) | 1 | 2023-11-01 07:19:02+00:00 | 8k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/test_freqtradebot.py | [
{
"identifier": "EXMS",
"path": "tests/conftest.py",
"snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'"
},
{
"identifier": "create_mock_trades",
"path": "tests/conftest.py",
"snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\"\n Create some fake trades ...\n :param is_short: Optional bool, None creates a mix of long and short trades.\n \"\"\"\n def add_trade(trade):\n if use_db:\n Trade.session.add(trade)\n else:\n LocalTrade.add_bt_trade(trade)\n is_short1 = is_short if is_short is not None else True\n is_short2 = is_short if is_short is not None else False\n # Simulate dry_run entries\n trade = mock_trade_1(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_2(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_3(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_4(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_5(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_6(fee, is_short1)\n add_trade(trade)\n\n if use_db:\n Trade.commit()"
},
{
"identifier": "create_mock_trades_usdt",
"path": "tests/conftest.py",
"snippet": "def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\"\n Create some fake trades ...\n \"\"\"\n def add_trade(trade):\n if use_db:\n Trade.session.add(trade)\n else:\n LocalTrade.add_bt_trade(trade)\n\n is_short1 = is_short if is_short is not None else True\n is_short2 = is_short if is_short is not None else False\n\n # Simulate dry_run entries\n trade = mock_trade_usdt_1(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_usdt_2(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_usdt_3(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_usdt_4(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_usdt_5(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_usdt_6(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_usdt_7(fee, is_short1)\n add_trade(trade)\n if use_db:\n Trade.commit()"
},
{
"identifier": "get_patched_freqtradebot",
"path": "tests/conftest.py",
"snippet": "def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:\n \"\"\"\n This function patches _init_modules() to not call dependencies\n :param mocker: a Mocker object to apply patches\n :param config: Config to pass to the bot\n :return: FreqtradeBot\n \"\"\"\n patch_freqtradebot(mocker, config)\n return FreqtradeBot(config)"
},
{
"identifier": "get_patched_worker",
"path": "tests/conftest.py",
"snippet": "def get_patched_worker(mocker, config) -> Worker:\n \"\"\"\n This function patches _init_modules() to not call dependencies\n :param mocker: a Mocker object to apply patches\n :param config: Config to pass to the bot\n :return: Worker\n \"\"\"\n patch_freqtradebot(mocker, config)\n return Worker(args=None, config=config)"
},
{
"identifier": "log_has",
"path": "tests/conftest.py",
"snippet": "def log_has(line, logs):\n \"\"\"Check if line is found on some caplog's message.\"\"\"\n return any(line == message for message in logs.messages)"
},
{
"identifier": "log_has_re",
"path": "tests/conftest.py",
"snippet": "def log_has_re(line, logs):\n \"\"\"Check if line matches some caplog's message.\"\"\"\n return any(re.match(line, message) for message in logs.messages)"
},
{
"identifier": "patch_edge",
"path": "tests/conftest.py",
"snippet": "def patch_edge(mocker) -> None:\n # \"ETH/BTC\",\n # \"LTC/BTC\",\n # \"XRP/BTC\",\n # \"NEO/BTC\"\n\n mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock(\n return_value={\n 'NEO/BTC': PairInfo(-0.20, 0.66, 3.71, 0.50, 1.71, 10, 25),\n 'LTC/BTC': PairInfo(-0.21, 0.66, 3.71, 0.50, 1.71, 11, 20),\n }\n ))\n mocker.patch('freqtrade.edge.Edge.calculate', MagicMock(return_value=True))"
},
{
"identifier": "patch_exchange",
"path": "tests/conftest.py",
"snippet": "def patch_exchange(\n mocker,\n api_mock=None,\n id='binance',\n mock_markets=True,\n mock_supported_modes=True\n) -> None:\n mocker.patch(f'{EXMS}._load_async_markets', return_value={})\n mocker.patch(f'{EXMS}.validate_config', MagicMock())\n mocker.patch(f'{EXMS}.validate_timeframes', MagicMock())\n mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id))\n mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title()))\n mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2))\n\n if mock_markets:\n if isinstance(mock_markets, bool):\n mock_markets = get_markets()\n mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets))\n\n if mock_supported_modes:\n mocker.patch(\n f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs',\n PropertyMock(return_value=[\n (TradingMode.MARGIN, MarginMode.CROSS),\n (TradingMode.MARGIN, MarginMode.ISOLATED),\n (TradingMode.FUTURES, MarginMode.CROSS),\n (TradingMode.FUTURES, MarginMode.ISOLATED)\n ])\n )\n\n if api_mock:\n mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock)\n else:\n mocker.patch(f'{EXMS}._init_ccxt', MagicMock())\n mocker.patch(f'{EXMS}.timeframes', PropertyMock(\n return_value=['5m', '15m', '1h', '1d']))"
},
{
"identifier": "patch_get_signal",
"path": "tests/conftest.py",
"snippet": "def patch_get_signal(\n freqtrade: FreqtradeBot,\n enter_long=True,\n exit_long=False,\n enter_short=False,\n exit_short=False,\n enter_tag: Optional[str] = None,\n exit_tag: Optional[str] = None,\n) -> None:\n \"\"\"\n :param mocker: mocker to patch IStrategy class\n :return: None\n \"\"\"\n # returns (Signal-direction, signaname)\n def patched_get_entry_signal(*args, **kwargs):\n direction = None\n if enter_long and not any([exit_long, enter_short]):\n direction = SignalDirection.LONG\n if enter_short and not any([exit_short, enter_long]):\n direction = SignalDirection.SHORT\n\n return direction, enter_tag\n\n freqtrade.strategy.get_entry_signal = patched_get_entry_signal\n\n def patched_get_exit_signal(pair, timeframe, dataframe, is_short):\n if is_short:\n return enter_short, exit_short, exit_tag\n else:\n return enter_long, exit_long, exit_tag\n\n # returns (enter, exit)\n freqtrade.strategy.get_exit_signal = patched_get_exit_signal\n\n freqtrade.exchange.refresh_latest_ohlcv = lambda p: None"
},
{
"identifier": "patch_wallet",
"path": "tests/conftest.py",
"snippet": "def patch_wallet(mocker, free=999.9) -> None:\n mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(\n return_value=free\n ))"
},
{
"identifier": "patch_whitelist",
"path": "tests/conftest.py",
"snippet": "def patch_whitelist(mocker, conf) -> None:\n mocker.patch('freqtrade.freqtradebot.FreqtradeBot._refresh_active_whitelist',\n MagicMock(return_value=conf['exchange']['pair_whitelist']))"
},
{
"identifier": "MOCK_TRADE_COUNT",
"path": "tests/conftest_trades.py",
"snippet": "MOCK_TRADE_COUNT = 6"
},
{
"identifier": "entry_side",
"path": "tests/conftest_trades.py",
"snippet": "def entry_side(is_short: bool):\n return \"sell\" if is_short else \"buy\""
},
{
"identifier": "exit_side",
"path": "tests/conftest_trades.py",
"snippet": "def exit_side(is_short: bool):\n return \"buy\" if is_short else \"sell\""
},
{
"identifier": "mock_order_2",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_2(is_short: bool):\n return {\n 'id': f'1235_{direc(is_short)}',\n 'symbol': 'ETC/BTC',\n 'status': 'closed',\n 'side': entry_side(is_short),\n 'type': 'limit',\n 'price': 0.123,\n 'amount': 123.0,\n 'filled': 123.0,\n 'cost': 15.129,\n 'remaining': 0.0,\n }"
},
{
"identifier": "mock_order_2_sell",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_2_sell(is_short: bool):\n return {\n 'id': f'12366_{direc(is_short)}',\n 'symbol': 'ETC/BTC',\n 'status': 'closed',\n 'side': exit_side(is_short),\n 'type': 'limit',\n 'price': 0.128,\n 'amount': 123.0,\n 'filled': 123.0,\n 'cost': 15.129,\n 'remaining': 0.0,\n }"
},
{
"identifier": "mock_order_3",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_3(is_short: bool):\n return {\n 'id': f'41231a12a_{direc(is_short)}',\n 'symbol': 'XRP/BTC',\n 'status': 'closed',\n 'side': entry_side(is_short),\n 'type': 'limit',\n 'price': 0.05,\n 'amount': 123.0,\n 'filled': 123.0,\n 'cost': 15.129,\n 'remaining': 0.0,\n }"
},
{
"identifier": "mock_order_3_sell",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_3_sell(is_short: bool):\n return {\n 'id': f'41231a666a_{direc(is_short)}',\n 'symbol': 'XRP/BTC',\n 'status': 'closed',\n 'side': exit_side(is_short),\n 'type': 'stop_loss_limit',\n 'price': 0.06,\n 'average': 0.06,\n 'amount': 123.0,\n 'filled': 123.0,\n 'cost': 15.129,\n 'remaining': 0.0,\n }"
},
{
"identifier": "mock_order_4",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_4(is_short: bool):\n return {\n 'id': f'prod_buy_{direc(is_short)}_12345',\n 'symbol': 'ETC/BTC',\n 'status': 'open',\n 'side': entry_side(is_short),\n 'type': 'limit',\n 'price': 0.123,\n 'amount': 123.0,\n 'filled': 0.0,\n 'cost': 15.129,\n 'remaining': 123.0,\n }"
},
{
"identifier": "mock_order_5_stoploss",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_5_stoploss(is_short: bool):\n return {\n 'id': f'prod_stoploss_{direc(is_short)}_3455',\n 'symbol': 'XRP/BTC',\n 'status': 'open',\n 'side': exit_side(is_short),\n 'type': 'stop_loss_limit',\n 'price': 0.123,\n 'amount': 123.0,\n 'filled': 0.0,\n 'cost': 0.0,\n 'remaining': 123.0,\n }"
},
{
"identifier": "mock_order_6_sell",
"path": "tests/conftest_trades.py",
"snippet": "def mock_order_6_sell(is_short: bool):\n return {\n 'id': f'prod_sell_{direc(is_short)}_6',\n 'symbol': 'LTC/BTC',\n 'status': 'open',\n 'side': exit_side(is_short),\n 'type': 'limit',\n 'price': 0.15 if is_short else 0.20,\n 'amount': 2.0,\n 'filled': 0.0,\n 'cost': 0.0,\n 'remaining': 2.0,\n }"
},
{
"identifier": "mock_trade_usdt_4",
"path": "tests/conftest_trades_usdt.py",
"snippet": "def mock_trade_usdt_4(fee, is_short: bool):\n \"\"\"\n Simulate prod entry\n \"\"\"\n trade = Trade(\n pair='NEO/USDT',\n stake_amount=20.0,\n amount=10.0,\n amount_requested=10.01,\n fee_open=fee.return_value,\n fee_close=fee.return_value,\n open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14),\n is_open=True,\n open_rate=2.0,\n exchange='binance',\n strategy='StrategyTestV2',\n timeframe=5,\n is_short=is_short,\n )\n o = Order.parse_from_ccxt_object(mock_order_usdt_4(is_short), 'NEO/USDT', entry_side(is_short))\n trade.orders.append(o)\n return trade"
}
] | import logging
import time
import pytest
from copy import deepcopy
from datetime import timedelta
from typing import List
from unittest.mock import ANY, MagicMock, PropertyMock, patch
from pandas import DataFrame
from sqlalchemy import select
from freqtrade.constants import CANCEL_REASON, UNLIMITED_STAKE_AMOUNT
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, RPCMessageType, RunMode,
SignalDirection, State)
from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError,
InvalidOrderException, OperationalException, PricingError,
TemporaryError)
from freqtrade.freqtradebot import FreqtradeBot
from freqtrade.persistence import Order, PairLocks, Trade
from freqtrade.persistence.models import PairLock
from freqtrade.plugins.protections.iprotection import ProtectionReturn
from freqtrade.util.datetime_helpers import dt_now, dt_utc
from freqtrade.worker import Worker
from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt,
get_patched_freqtradebot, get_patched_worker, log_has, log_has_re,
patch_edge, patch_exchange, patch_get_signal, patch_wallet,
patch_whitelist)
from tests.conftest_trades import (MOCK_TRADE_COUNT, entry_side, exit_side, mock_order_2,
mock_order_2_sell, mock_order_3, mock_order_3_sell, mock_order_4,
mock_order_5_stoploss, mock_order_6_sell)
from tests.conftest_trades_usdt import mock_trade_usdt_4 | 6,431 | (False, 1, 1),
(True, 1, 1),
])
def test_cancel_all_open_orders(mocker, default_conf_usdt, fee, limit_order, limit_order_open,
is_short, buy_calls, sell_calls):
default_conf_usdt['cancel_open_orders_on_exit'] = True
mocker.patch(
f'{EXMS}.fetch_order',
side_effect=[
ExchangeError(),
limit_order[exit_side(is_short)],
limit_order_open[entry_side(is_short)],
limit_order_open[exit_side(is_short)],
]
)
buy_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter')
sell_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == MOCK_TRADE_COUNT
freqtrade.cancel_all_open_orders()
assert buy_mock.call_count == buy_calls
assert sell_mock.call_count == sell_calls
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_check_for_open_trades(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 0
create_mock_trades(fee, is_short)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 1
assert 'Handle these trades manually' in freqtrade.rpc.send_msg.call_args[0][0]['status']
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_startup_update_open_orders(mocker, default_conf_usdt, fee, caplog, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
freqtrade.startup_update_open_orders()
assert not log_has_re(r"Error updating Order .*", caplog)
caplog.clear()
freqtrade.config['dry_run'] = False
freqtrade.startup_update_open_orders()
assert len(Order.get_open_orders()) == 4
matching_buy_order = mock_order_4(is_short=is_short)
matching_buy_order.update({
'status': 'closed',
})
mocker.patch(f'{EXMS}.fetch_order', return_value=matching_buy_order)
freqtrade.startup_update_open_orders()
# Only stoploss and sell orders are kept open
assert len(Order.get_open_orders()) == 3
caplog.clear()
mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError)
freqtrade.startup_update_open_orders()
assert log_has_re(r"Error updating Order .*", caplog)
mocker.patch(f'{EXMS}.fetch_order', side_effect=InvalidOrderException)
hto_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order')
# Orders which are no longer found after X days should be assumed as canceled.
freqtrade.startup_update_open_orders()
assert log_has_re(r"Order is older than \d days.*", caplog)
assert hto_mock.call_count == 3
assert hto_mock.call_args_list[0][0][0]['status'] == 'canceled'
assert hto_mock.call_args_list[1][0][0]['status'] == 'canceled'
@pytest.mark.usefixtures("init_persistence")
def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades_usdt(fee)
trades = Trade.get_trades().all()
trades[-1].exchange = 'some_other_exchange'
for trade in trades:
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
freqtrade.startup_backpopulate_precision()
trades = Trade.get_trades().all()
for trade in trades:
if trade.exchange == 'some_other_exchange':
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
else:
assert trade.price_precision is not None
assert trade.amount_precision is not None
assert trade.precision_mode is not None
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
def patch_with_fee(order):
order.update({'fee': {'cost': 0.1, 'rate': 0.01,
'currency': order['symbol'].split('/')[0]}})
return order
mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order',
side_effect=[
| # pragma pylint: disable=missing-docstring, C0103
# pragma pylint: disable=protected-access, too-many-lines, invalid-name, too-many-arguments
def patch_RPCManager(mocker) -> MagicMock:
"""
This function mock RPC manager to avoid repeating this code in almost every tests
:param mocker: mocker to patch RPCManager class
:return: RPCManager.send_msg MagicMock to track if this method is called
"""
mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())
rpc_mock = mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock())
return rpc_mock
# Unit tests
def test_freqtradebot_state(mocker, default_conf_usdt, markets) -> None:
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
assert freqtrade.state is State.RUNNING
default_conf_usdt.pop('initial_state')
freqtrade = FreqtradeBot(default_conf_usdt)
assert freqtrade.state is State.STOPPED
def test_process_stopped(mocker, default_conf_usdt) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders')
freqtrade.process_stopped()
assert coo_mock.call_count == 0
default_conf_usdt['cancel_open_orders_on_exit'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.process_stopped()
assert coo_mock.call_count == 1
def test_process_calls_sendmsg(mocker, default_conf_usdt) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.process()
assert freqtrade.rpc.process_msg_queue.call_count == 1
def test_bot_cleanup(mocker, default_conf_usdt, caplog) -> None:
mock_cleanup = mocker.patch('freqtrade.freqtradebot.Trade.commit')
coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.cleanup()
assert log_has('Cleaning up modules ...', caplog)
assert mock_cleanup.call_count == 1
assert coo_mock.call_count == 0
freqtrade.config['cancel_open_orders_on_exit'] = True
freqtrade.cleanup()
assert coo_mock.call_count == 1
def test_bot_cleanup_db_errors(mocker, default_conf_usdt, caplog) -> None:
mocker.patch('freqtrade.freqtradebot.Trade.commit',
side_effect=OperationalException())
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.check_for_open_trades',
side_effect=OperationalException())
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.emc = MagicMock()
freqtrade.emc.shutdown = MagicMock()
freqtrade.cleanup()
assert freqtrade.emc.shutdown.call_count == 1
@pytest.mark.parametrize('runmode', [
RunMode.DRY_RUN,
RunMode.LIVE
])
def test_order_dict(default_conf_usdt, mocker, runmode, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
conf = default_conf_usdt.copy()
conf['runmode'] = runmode
conf['order_types'] = {
'entry': 'market',
'exit': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': True,
}
conf['entry_pricing']['price_side'] = 'ask'
freqtrade = FreqtradeBot(conf)
if runmode == RunMode.LIVE:
assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
assert freqtrade.strategy.order_types['stoploss_on_exchange']
caplog.clear()
# is left untouched
conf = default_conf_usdt.copy()
conf['runmode'] = runmode
conf['order_types'] = {
'entry': 'market',
'exit': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': False,
}
freqtrade = FreqtradeBot(conf)
assert not freqtrade.strategy.order_types['stoploss_on_exchange']
assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
freqtrade = FreqtradeBot(default_conf_usdt)
result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
assert result == default_conf_usdt['stake_amount']
@pytest.mark.parametrize('runmode', [
RunMode.DRY_RUN,
RunMode.LIVE
])
def test_load_strategy_no_keys(default_conf_usdt, mocker, runmode, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
conf = deepcopy(default_conf_usdt)
conf['runmode'] = runmode
erm = mocker.patch('freqtrade.freqtradebot.ExchangeResolver.load_exchange')
freqtrade = FreqtradeBot(conf)
strategy_config = freqtrade.strategy.config
assert id(strategy_config['exchange']) == id(conf['exchange'])
# Keys have been removed and are not passed to the exchange
assert strategy_config['exchange']['key'] == ''
assert strategy_config['exchange']['secret'] == ''
assert erm.call_count == 1
ex_conf = erm.call_args_list[0][1]['exchange_config']
assert id(ex_conf) != id(conf['exchange'])
# Keys are still present
assert ex_conf['key'] != ''
assert ex_conf['key'] == default_conf_usdt['exchange']['key']
assert ex_conf['secret'] != ''
assert ex_conf['secret'] == default_conf_usdt['exchange']['secret']
@pytest.mark.parametrize("amend_last,wallet,max_open,lsamr,expected", [
(False, 120, 2, 0.5, [60, None]),
(True, 120, 2, 0.5, [60, 58.8]),
(False, 180, 3, 0.5, [60, 60, None]),
(True, 180, 3, 0.5, [60, 60, 58.2]),
(False, 122, 3, 0.5, [60, 60, None]),
(True, 122, 3, 0.5, [60, 60, 0.0]),
(True, 167, 3, 0.5, [60, 60, 45.33]),
(True, 122, 3, 1, [60, 60, 0.0]),
])
def test_check_available_stake_amount(
default_conf_usdt, ticker_usdt, mocker, fee, limit_buy_order_usdt_open,
amend_last, wallet, max_open, lsamr, expected
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee
)
default_conf_usdt['dry_run_wallet'] = wallet
default_conf_usdt['amend_last_stake_amount'] = amend_last
default_conf_usdt['last_stake_amount_min_ratio'] = lsamr
freqtrade = FreqtradeBot(default_conf_usdt)
for i in range(0, max_open):
if expected[i] is not None:
limit_buy_order_usdt_open['id'] = str(i)
result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
assert pytest.approx(result) == expected[i]
freqtrade.execute_entry('ETH/USDT', result)
else:
with pytest.raises(DependencyException):
freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
def test_edge_called_in_process(mocker, edge_conf) -> None:
patch_RPCManager(mocker)
patch_edge(mocker)
patch_exchange(mocker)
freqtrade = FreqtradeBot(edge_conf)
patch_get_signal(freqtrade)
freqtrade.process()
assert freqtrade.active_pair_whitelist == ['NEO/BTC', 'LTC/BTC']
def test_edge_overrides_stake_amount(mocker, edge_conf) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['dry_run_wallet'] = 999.9
freqtrade = FreqtradeBot(edge_conf)
assert freqtrade.wallets.get_trade_stake_amount(
'NEO/BTC', freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.20
assert freqtrade.wallets.get_trade_stake_amount(
'LTC/BTC', freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.21
@pytest.mark.parametrize('buy_price_mult,ignore_strat_sl', [
(0.79, False), # Override stoploss
(0.85, True), # Override strategy stoploss
])
def test_edge_overrides_stoploss(limit_order, fee, caplog, mocker,
buy_price_mult, ignore_strat_sl, edge_conf) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['max_open_trades'] = float('inf')
# Strategy stoploss is -0.1 but Edge imposes a stoploss at -0.2
# Thus, if price falls 21%, stoploss should be triggered
#
# mocking the ticker: price is falling ...
enter_price = limit_order['buy']['price']
ticker_val = {
'bid': enter_price,
'ask': enter_price,
'last': enter_price,
}
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value=ticker_val),
get_fee=fee,
)
#############################################
# Create a trade with "limit_buy_order_usdt" price
freqtrade = FreqtradeBot(edge_conf)
freqtrade.active_pair_whitelist = ['NEO/BTC']
patch_get_signal(freqtrade)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
caplog.clear()
#############################################
ticker_val.update({
'bid': enter_price * buy_price_mult,
'ask': enter_price * buy_price_mult,
'last': enter_price * buy_price_mult,
})
# stoploss shoud be hit
assert freqtrade.handle_trade(trade) is not ignore_strat_sl
if not ignore_strat_sl:
assert log_has_re('Exit for NEO/BTC detected. Reason: stop_loss.*', caplog)
assert trade.exit_reason == ExitType.STOP_LOSS.value
# Test compatibility ...
assert trade.sell_reason == ExitType.STOP_LOSS.value
def test_total_open_trades_stakes(mocker, default_conf_usdt, ticker_usdt, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = 2
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade is not None
assert trade.stake_amount == 60.0
assert trade.is_open
assert trade.open_date is not None
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade).order_by(Trade.id.desc())).first()
assert trade is not None
assert trade.stake_amount == 60.0
assert trade.is_open
assert trade.open_date is not None
assert Trade.total_open_trades_stakes() == 120.0
@pytest.mark.parametrize("is_short,open_rate", [
(False, 2.0),
(True, 2.2)
])
def test_create_trade(default_conf_usdt, ticker_usdt, limit_order,
fee, mocker, is_short, open_rate) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
# Save state of current whitelist
whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist'])
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.create_trade('ETH/USDT')
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade is not None
assert pytest.approx(trade.stake_amount) == 60.0
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
# Simulate fulfilled LIMIT_BUY order for trade
oobj = Order.parse_from_ccxt_object(
limit_order[entry_side(is_short)], 'ADA/USDT', entry_side(is_short))
trade.update_trade(oobj)
assert trade.open_rate == open_rate
assert trade.amount == 30.0
assert whitelist == default_conf_usdt['exchange']['pair_whitelist']
def test_create_trade_no_stake_amount(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_wallet(mocker, free=default_conf_usdt['stake_amount'] * 0.5)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
with pytest.raises(DependencyException, match=r'.*stake amount.*'):
freqtrade.create_trade('ETH/USDT')
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('stake_amount,create,amount_enough,max_open_trades', [
(5.0, True, True, 99),
(0.042, True, False, 99), # Amount will be adjusted to min - which is 0.051
(0, False, True, 99),
(UNLIMITED_STAKE_AMOUNT, False, True, 0),
])
def test_create_trade_minimal_amount(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker,
stake_amount, create, amount_enough, max_open_trades, caplog, is_short
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
enter_mock = MagicMock(return_value=limit_order_open[entry_side(is_short)])
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=enter_mock,
get_fee=fee,
)
default_conf_usdt['max_open_trades'] = max_open_trades
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.config['stake_amount'] = stake_amount
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
if create:
assert freqtrade.create_trade('ETH/USDT')
if amount_enough:
rate, amount = enter_mock.call_args[1]['rate'], enter_mock.call_args[1]['amount']
assert rate * amount <= default_conf_usdt['stake_amount']
else:
assert log_has_re(
r"Stake amount for pair .* is too small.*",
caplog
)
else:
assert not freqtrade.create_trade('ETH/USDT')
if not max_open_trades:
assert freqtrade.wallets.get_trade_stake_amount('ETH/USDT', freqtrade.edge) == 0
@pytest.mark.parametrize('whitelist,positions', [
(["ETH/USDT"], 1), # No pairs left
([], 0), # No pairs in whitelist
])
def test_enter_positions_no_pairs_left(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open,
fee, whitelist, positions, mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
default_conf_usdt['exchange']['pair_whitelist'] = whitelist
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
assert n == positions
if positions:
assert not log_has_re(r"No currency pair in active pair whitelist.*", caplog)
n = freqtrade.enter_positions()
assert n == 0
assert log_has_re(r"No currency pair in active pair whitelist.*", caplog)
else:
assert n == 0
assert log_has("Active pair whitelist is empty.", caplog)
@pytest.mark.usefixtures("init_persistence")
def test_enter_positions_global_pairlock(default_conf_usdt, ticker_usdt, limit_buy_order_usdt, fee,
mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
message = r"Global pairlock active until.* Not creating new trades."
n = freqtrade.enter_positions()
# 0 trades, but it's not because of pairlock.
assert n == 0
assert not log_has_re(message, caplog)
caplog.clear()
PairLocks.lock_pair('*', dt_now() + timedelta(minutes=20), 'Just because', side='*')
n = freqtrade.enter_positions()
assert n == 0
assert log_has_re(message, caplog)
@pytest.mark.parametrize('is_short', [False, True])
def test_handle_protections(mocker, default_conf_usdt, fee, is_short):
default_conf_usdt['protections'] = [
{"method": "CooldownPeriod", "stop_duration": 60},
{
"method": "StoplossGuard",
"lookback_period_candles": 24,
"trade_limit": 4,
"stop_duration_candles": 4,
"only_per_pair": False
}
]
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.protections._protection_handlers[1].global_stop = MagicMock(
return_value=ProtectionReturn(True, dt_now() + timedelta(hours=1), "asdf"))
create_mock_trades(fee, is_short)
freqtrade.handle_protections('ETC/BTC', '*')
send_msg_mock = freqtrade.rpc.send_msg
assert send_msg_mock.call_count == 2
assert send_msg_mock.call_args_list[0][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER
assert send_msg_mock.call_args_list[1][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL
def test_create_trade_no_signal(default_conf_usdt, fee, mocker) -> None:
default_conf_usdt['dry_run'] = True
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
get_fee=fee,
)
default_conf_usdt['stake_amount'] = 10
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_long=False, exit_long=False)
assert not freqtrade.create_trade('ETH/USDT')
@pytest.mark.parametrize("max_open", range(0, 5))
@pytest.mark.parametrize("tradable_balance_ratio,modifier", [(1.0, 1), (0.99, 0.8), (0.5, 0.5)])
def test_create_trades_multiple_trades(
default_conf_usdt, ticker_usdt, fee, mocker, limit_buy_order_usdt_open,
max_open, tradable_balance_ratio, modifier
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = max_open
default_conf_usdt['tradable_balance_ratio'] = tradable_balance_ratio
default_conf_usdt['dry_run_wallet'] = 60.0 * max_open
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
trades = Trade.get_open_trades()
# Expected trades should be max_open * a modified value
# depending on the configured tradable_balance
assert n == max(int(max_open * modifier), 0)
assert len(trades) == max(int(max_open * modifier), 0)
def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
limit_buy_order_usdt_open, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = 4
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
get_funding_fees=MagicMock(side_effect=ExchangeError()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
# Create 2 existing trades
freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount'])
freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount'])
assert log_has("Could not find funding fee.", caplog)
assert len(Trade.get_open_trades()) == 2
# Change order_id for new orders
limit_buy_order_usdt_open['id'] = '123444'
# Create 2 new trades using create_trades
assert freqtrade.create_trade('ETH/USDT')
assert freqtrade.create_trade('NEO/BTC')
trades = Trade.get_open_trades()
assert len(trades) == 4
@pytest.mark.parametrize('is_short', [False, True])
def test_process_trade_creation(default_conf_usdt, ticker_usdt, limit_order, limit_order_open,
is_short, fee, mocker, caplog
) -> None:
ticker_side = 'ask' if is_short else 'bid'
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_order_open[entry_side(is_short)]),
fetch_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
trades = Trade.get_open_trades()
assert not trades
freqtrade.process()
trades = Trade.get_open_trades()
assert len(trades) == 1
trade = trades[0]
assert trade is not None
assert pytest.approx(trade.stake_amount) == default_conf_usdt['stake_amount']
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
assert trade.open_rate == ticker_usdt.return_value[ticker_side]
assert pytest.approx(trade.amount) == 60 / ticker_usdt.return_value[ticker_side]
assert log_has(
f'{"Short" if is_short else "Long"} signal found: about create a new trade for ETH/USDT '
'with stake_amount: 60.0 ...',
caplog
)
def test_process_exchange_failures(default_conf_usdt, ticker_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=TemporaryError)
)
sleep_mock = mocker.patch('time.sleep', side_effect=lambda _: None)
worker = Worker(args=None, config=default_conf_usdt)
patch_get_signal(worker.freqtrade)
worker._process_running()
assert sleep_mock.has_calls()
def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) -> None:
msg_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=OperationalException)
)
worker = Worker(args=None, config=default_conf_usdt)
patch_get_signal(worker.freqtrade)
assert worker.freqtrade.state == State.RUNNING
worker._process_running()
assert worker.freqtrade.state == State.STOPPED
assert 'OperationalException' in msg_mock.call_args_list[-1][0][0]['status']
def test_process_trade_handling(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open, fee,
mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
fetch_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
trades = Trade.get_open_trades()
assert not trades
freqtrade.process()
trades = Trade.get_open_trades()
assert len(trades) == 1
# Nothing happened ...
freqtrade.process()
assert len(trades) == 1
def test_process_trade_no_whitelist_pair(default_conf_usdt, ticker_usdt, limit_buy_order_usdt,
fee, mocker) -> None:
""" Test process with trade not in pair list """
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}),
fetch_order=MagicMock(return_value=limit_buy_order_usdt),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
pair = 'BLK/BTC'
# Ensure the pair is not in the whitelist!
assert pair not in default_conf_usdt['exchange']['pair_whitelist']
# create open trade not in whitelist
Trade.session.add(Trade(
pair=pair,
stake_amount=0.001,
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
amount=20,
open_rate=0.01,
exchange='binance',
))
Trade.session.add(Trade(
pair='ETH/USDT',
stake_amount=0.001,
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
amount=12,
open_rate=0.001,
exchange='binance',
))
Trade.commit()
assert pair not in freqtrade.active_pair_whitelist
freqtrade.process()
assert pair in freqtrade.active_pair_whitelist
# Make sure each pair is only in the list once
assert len(freqtrade.active_pair_whitelist) == len(set(freqtrade.active_pair_whitelist))
def test_process_informative_pairs_added(default_conf_usdt, ticker_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
refresh_mock = MagicMock()
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=TemporaryError),
refresh_latest_ohlcv=refresh_mock,
)
inf_pairs = MagicMock(return_value=[
("BTC/ETH", '1m', CandleType.SPOT),
("ETH/USDT", "1h", CandleType.SPOT)
])
mocker.patch.multiple(
'freqtrade.strategy.interface.IStrategy',
get_exit_signal=MagicMock(return_value=(False, False)),
get_entry_signal=MagicMock(return_value=(None, None))
)
mocker.patch('time.sleep', return_value=None)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.informative_pairs = inf_pairs
# patch_get_signal(freqtrade)
freqtrade.process()
assert inf_pairs.call_count == 1
assert refresh_mock.call_count == 1
assert ("BTC/ETH", "1m", CandleType.SPOT) in refresh_mock.call_args[0][0]
assert ("ETH/USDT", "1h", CandleType.SPOT) in refresh_mock.call_args[0][0]
assert ("ETH/USDT", default_conf_usdt["timeframe"],
CandleType.SPOT) in refresh_mock.call_args[0][0]
@pytest.mark.parametrize("is_short,trading_mode,exchange_name,margin_mode,liq_buffer,liq_price", [
(False, 'spot', 'binance', None, 0.0, None),
(True, 'spot', 'binance', None, 0.0, None),
(False, 'spot', 'gate', None, 0.0, None),
(True, 'spot', 'gate', None, 0.0, None),
(False, 'spot', 'okx', None, 0.0, None),
(True, 'spot', 'okx', None, 0.0, None),
(True, 'futures', 'binance', 'isolated', 0.0, 11.88151815181518),
(False, 'futures', 'binance', 'isolated', 0.0, 8.080471380471382),
(True, 'futures', 'gate', 'isolated', 0.0, 11.87413417771621),
(False, 'futures', 'gate', 'isolated', 0.0, 8.085708510208207),
(True, 'futures', 'binance', 'isolated', 0.05, 11.7874422442244),
(False, 'futures', 'binance', 'isolated', 0.05, 8.17644781144781),
(True, 'futures', 'gate', 'isolated', 0.05, 11.7804274688304),
(False, 'futures', 'gate', 'isolated', 0.05, 8.181423084697796),
(True, 'futures', 'okx', 'isolated', 0.0, 11.87413417771621),
(False, 'futures', 'okx', 'isolated', 0.0, 8.085708510208207),
(True, 'futures', 'bybit', 'isolated', 0.0, 11.9),
(False, 'futures', 'bybit', 'isolated', 0.0, 8.1),
])
def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
limit_order_open, is_short, trading_mode,
exchange_name, margin_mode, liq_buffer, liq_price) -> None:
"""
exchange_name = binance, is_short = true
leverage = 5
position = 0.2 * 5
((wb + cum_b) - (side_1 * position * ep1)) / ((position * mmr_b) - (side_1 * position))
((2 + 0.01) - ((-1) * 1 * 10)) / ((1 * 0.01) - ((-1) * 1)) = 11.89108910891089
exchange_name = binance, is_short = false
((wb + cum_b) - (side_1 * position * ep1)) / ((position * mmr_b) - (side_1 * position))
((2 + 0.01) - (1 * 1 * 10)) / ((1 * 0.01) - (1 * 1)) = 8.070707070707071
exchange_name = gate/okx, is_short = true
(open_rate + (wallet_balance / position)) / (1 + (mm_ratio + taker_fee_rate))
(10 + (2 / 1)) / (1 + (0.01 + 0.0006)) = 11.87413417771621
exchange_name = gate/okx, is_short = false
(open_rate - (wallet_balance / position)) / (1 - (mm_ratio + taker_fee_rate))
(10 - (2 / 1)) / (1 - (0.01 + 0.0006)) = 8.085708510208207
"""
# TODO: Split this test into multiple tests to improve readability
open_order = limit_order_open[entry_side(is_short)]
order = limit_order[entry_side(is_short)]
default_conf_usdt['trading_mode'] = trading_mode
default_conf_usdt['liquidation_buffer'] = liq_buffer
leverage = 1.0 if trading_mode == 'spot' else 5.0
default_conf_usdt['exchange']['name'] = exchange_name
if margin_mode:
default_conf_usdt['margin_mode'] = margin_mode
mocker.patch('freqtrade.exchange.gate.Gate.validate_ordertypes')
patch_RPCManager(mocker)
patch_exchange(mocker, id=exchange_name)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=False)
freqtrade.strategy.leverage = MagicMock(return_value=leverage)
stake_amount = 2
bid = 0.11
enter_rate_mock = MagicMock(return_value=bid)
enter_mm = MagicMock(return_value=open_order)
mocker.patch.multiple(
EXMS,
get_rate=enter_rate_mock,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=enter_mm,
get_min_pair_stake_amount=MagicMock(return_value=1),
get_max_pair_stake_amount=MagicMock(return_value=500000),
get_fee=fee,
get_funding_fees=MagicMock(return_value=0),
name=exchange_name,
get_maintenance_ratio_and_amt=MagicMock(return_value=(0.01, 0.01)),
get_max_leverage=MagicMock(return_value=10),
)
mocker.patch.multiple(
'freqtrade.exchange.okx.Okx',
get_max_pair_stake_amount=MagicMock(return_value=500000),
)
pair = 'ETH/USDT'
assert not freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
assert enter_rate_mock.call_count == 1
assert enter_mm.call_count == 0
assert freqtrade.strategy.confirm_trade_entry.call_count == 1
enter_rate_mock.reset_mock()
open_order['id'] = '22'
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
assert freqtrade.execute_entry(pair, stake_amount)
assert enter_rate_mock.call_count == 1
assert enter_mm.call_count == 1
call_args = enter_mm.call_args_list[0][1]
assert call_args['pair'] == pair
assert call_args['rate'] == bid
assert pytest.approx(call_args['amount']) == round(stake_amount / bid * leverage, 8)
enter_rate_mock.reset_mock()
# Should create an open trade with an open order id
# As the order is not fulfilled yet
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert trade.is_open is True
assert trade.has_open_orders
assert '22' in trade.open_orders_ids
# Test calling with price
open_order['id'] = '33'
fix_price = 0.06
assert freqtrade.execute_entry(pair, stake_amount, fix_price, is_short=is_short)
# Make sure get_rate wasn't called again
assert enter_rate_mock.call_count == 0
assert enter_mm.call_count == 2
call_args = enter_mm.call_args_list[1][1]
assert call_args['pair'] == pair
assert call_args['rate'] == fix_price
assert pytest.approx(call_args['amount']) == round(stake_amount / fix_price * leverage, 8)
# In case of closed order
order['status'] = 'closed'
order['average'] = 10
order['cost'] = 300
order['id'] = '444'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[2]
trade.is_short = is_short
assert trade
assert not trade.has_open_orders
assert trade.open_rate == 10
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
assert pytest.approx(trade.liquidation_price) == liq_price
# In case of rejected or expired order and partially filled
order['status'] = 'expired'
order['amount'] = 30.0
order['filled'] = 20.0
order['remaining'] = 10.00
order['average'] = 0.5
order['cost'] = 10.0
order['id'] = '555'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert freqtrade.execute_entry(pair, stake_amount)
trade = Trade.session.scalars(select(Trade)).all()[3]
trade.is_short = is_short
assert trade
assert not trade.has_open_orders
assert trade.open_rate == 0.5
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
# Test with custom stake
order['status'] = 'open'
order['id'] = '556'
freqtrade.strategy.custom_stake_amount = lambda **kwargs: 150.0
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[4]
trade.is_short = is_short
assert trade
assert pytest.approx(trade.stake_amount) == 150
# Exception case
order['id'] = '557'
freqtrade.strategy.custom_stake_amount = lambda **kwargs: 20 / 0
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[5]
trade.is_short = is_short
assert trade
assert pytest.approx(trade.stake_amount) == 2.0
# In case of the order is rejected and not filled at all
order['status'] = 'rejected'
order['amount'] = 30.0 * leverage
order['filled'] = 0.0
order['remaining'] = 30.0
order['average'] = 0.5
order['cost'] = 0.0
order['id'] = '66'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert not freqtrade.execute_entry(pair, stake_amount)
assert freqtrade.strategy.leverage.call_count == 0 if trading_mode == 'spot' else 2
# Fail to get price...
mocker.patch(f'{EXMS}.get_rate', MagicMock(return_value=0.0))
with pytest.raises(PricingError, match="Could not determine entry price."):
freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
# In case of custom entry price
mocker.patch(f'{EXMS}.get_rate', return_value=0.50)
order['status'] = 'open'
order['id'] = '5566'
freqtrade.strategy.custom_entry_price = lambda **kwargs: 0.508
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[6]
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 0.508
# In case of custom entry price set to None
order['status'] = 'open'
order['id'] = '5567'
freqtrade.strategy.custom_entry_price = lambda **kwargs: None
mocker.patch.multiple(
EXMS,
get_rate=MagicMock(return_value=10),
)
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[7]
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 10
# In case of custom entry price not float type
order['status'] = 'open'
order['id'] = '5568'
freqtrade.strategy.custom_entry_price = lambda **kwargs: "string price"
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[8]
# Trade(id=9, pair=ETH/USDT, amount=0.20000000, is_short=False,
# leverage=1.0, open_rate=10.00000000, open_since=...)
# Trade(id=9, pair=ETH/USDT, amount=0.60000000, is_short=True,
# leverage=3.0, open_rate=10.00000000, open_since=...)
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 10
# In case of too high stake amount
order['status'] = 'open'
order['id'] = '55672'
mocker.patch.multiple(
EXMS,
get_max_pair_stake_amount=MagicMock(return_value=500),
)
freqtrade.exchange.get_max_pair_stake_amount = MagicMock(return_value=500)
assert freqtrade.execute_entry(pair, 2000, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[9]
trade.is_short = is_short
assert pytest.approx(trade.stake_amount) == 500
order['id'] = '55673'
freqtrade.strategy.leverage.reset_mock()
assert freqtrade.execute_entry(pair, 200, leverage_=3)
assert freqtrade.strategy.leverage.call_count == 0
trade = Trade.session.scalars(select(Trade)).all()[10]
assert trade.leverage == 1 if trading_mode == 'spot' else 3
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_entry_confirm_error(mocker, default_conf_usdt, fee, limit_order, is_short) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_rate=MagicMock(return_value=0.11),
get_min_pair_stake_amount=MagicMock(return_value=1),
get_fee=fee,
)
stake_amount = 2
pair = 'ETH/USDT'
freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=ValueError)
assert freqtrade.execute_entry(pair, stake_amount)
limit_order[entry_side(is_short)]['id'] = '222'
freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=Exception)
assert freqtrade.execute_entry(pair, stake_amount)
limit_order[entry_side(is_short)]['id'] = '2223'
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
assert freqtrade.execute_entry(pair, stake_amount)
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=False)
assert not freqtrade.execute_entry(pair, stake_amount)
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_entry_min_leverage(mocker, default_conf_usdt, fee, limit_order, is_short) -> None:
default_conf_usdt['trading_mode'] = 'futures'
default_conf_usdt['margin_mode'] = 'isolated'
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_rate=MagicMock(return_value=0.11),
# Minimum stake-amount is ~5$
get_maintenance_ratio_and_amt=MagicMock(return_value=(0.0, 0.0)),
_fetch_and_calculate_funding_fees=MagicMock(return_value=0),
get_fee=fee,
get_max_leverage=MagicMock(return_value=5.0),
)
stake_amount = 2
pair = 'SOL/BUSD:BUSD'
freqtrade.strategy.leverage = MagicMock(return_value=5.0)
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).first()
assert trade.leverage == 5.0
# assert trade.stake_amount == 2
@pytest.mark.parametrize("is_short", [False, True])
def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_short, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_fee=fee,
)
order = limit_order[entry_side(is_short)]
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
stoploss = MagicMock(return_value={'id': 13434334})
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.stoploss_order_id = None
trade.is_open = True
trades = [trade]
freqtrade.exit_positions(trades)
assert trade.stoploss_order_id == '13434334'
assert stoploss.call_count == 1
assert trade.is_open is True
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_short,
limit_order) -> None:
stop_order_dict = {'id': "13434334"}
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# First case: when stoploss is not yet set but the order is open
# should get the stoploss order id immediately
# and should return false as no trade actually happened
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "13434334"
# Second case: when stoploss is set but it is not yet hit
# should do nothing and return false
stop_order_dict.update({'id': "102"})
trade.is_open = True
trade.stoploss_order_id = "102"
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='102',
status='open',
)
)
hanging_stoploss_order = MagicMock(return_value={'status': 'open'})
mocker.patch(f'{EXMS}.fetch_stoploss_order', hanging_stoploss_order)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert trade.stoploss_order_id == "102"
# Third case: when stoploss was set but it was canceled for some reason
# should set a stoploss immediately and return False
caplog.clear()
trade.is_open = True
trade.stoploss_order_id = "102"
canceled_stoploss_order = MagicMock(return_value={'id': '103_1', 'status': 'canceled'})
mocker.patch(f'{EXMS}.fetch_stoploss_order', canceled_stoploss_order)
stoploss.reset_mock()
amount_before = trade.amount
stop_order_dict.update({'id': "103_1"})
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "103_1"
assert trade.amount == amount_before
# Fourth case: when stoploss is set and it is hit
# should unset stoploss_order_id and return true
# as a trade actually happened
caplog.clear()
freqtrade.enter_positions()
stop_order_dict.update({'id': "104"})
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = "104"
trade.orders.append(Order(
ft_order_side='stoploss',
order_id='104',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=0.0,
))
assert trade
stoploss_order_hit = MagicMock(return_value={
'id': "104",
'status': 'closed',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': enter_order['amount'],
'remaining': 0,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
assert freqtrade.handle_stoploss_on_exchange(trade) is True
assert log_has_re(r'STOP_LOSS_LIMIT is hit for Trade\(id=1, .*\)\.', caplog)
assert trade.stoploss_order_id is None
assert trade.is_open is False
caplog.clear()
mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError())
trade.is_open = True
freqtrade.handle_stoploss_on_exchange(trade)
assert log_has('Unable to place a stoploss order on exchange.', caplog)
assert trade.stoploss_order_id is None
# Fifth case: fetch_order returns InvalidOrder
# It should try to add stoploss order
stop_order_dict.update({'id': "105"})
trade.stoploss_order_id = "105"
stoploss.reset_mock()
mocker.patch(f'{EXMS}.fetch_stoploss_order', side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade.handle_stoploss_on_exchange(trade)
assert stoploss.call_count == 1
# Sixth case: Closed Trade
# Should not create new order
trade.stoploss_order_id = None
trade.is_open = False
stoploss.reset_mock()
mocker.patch(f'{EXMS}.fetch_order')
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 0
# Seventh case: emergency exit triggered
# Trailing stop should not act anymore
stoploss_order_cancelled = MagicMock(side_effect=[{
'id': "107",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'amount': enter_order['amount'],
'filled': 0,
'remaining': enter_order['amount'],
'info': {'stopPrice': 22},
}])
trade.stoploss_order_id = "107"
trade.is_open = True
trade.stoploss_last_update = dt_now() - timedelta(hours=1)
trade.stop_loss = 24
trade.exit_reason = None
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='107',
status='open',
)
)
freqtrade.config['trailing_stop'] = True
stoploss = MagicMock(side_effect=InvalidOrderException())
Trade.commit()
mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result',
side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_cancelled)
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert trade.stoploss_order_id is None
assert trade.is_open is False
assert trade.exit_reason == str(ExitType.EMERGENCY_EXIT)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_partial(
mocker, default_conf_usdt, fee, is_short, limit_order) -> None:
stop_order_dict = {'id': "101", "status": "open"}
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "101"
assert trade.amount == 30
stop_order_dict.update({'id': "102"})
# Stoploss on exchange is cancelled on exchange, but filled partially.
# Must update trade amount to guarantee successful exit.
stoploss_order_hit = MagicMock(return_value={
'id': "101",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': trade.amount / 2,
'remaining': trade.amount / 2,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# Stoploss filled partially ...
assert trade.amount == 15
assert trade.stoploss_order_id == "102"
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_partial_cancel_here(
mocker, default_conf_usdt, fee, is_short, limit_order, caplog) -> None:
stop_order_dict = {'id': "101", "status": "open"}
default_conf_usdt['trailing_stop'] = True
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "101"
assert trade.amount == 30
stop_order_dict.update({'id': "102"})
# Stoploss on exchange is open.
# Freqtrade cancels the stop - but cancel returns a partial filled order.
stoploss_order_hit = MagicMock(return_value={
'id': "101",
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': 0,
'remaining': trade.amount,
'amount': enter_order['amount'],
})
stoploss_order_cancel = MagicMock(return_value={
'id': "101",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': trade.amount / 2,
'remaining': trade.amount / 2,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result', stoploss_order_cancel)
trade.stoploss_last_update = dt_now() - timedelta(minutes=10)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# Canceled Stoploss filled partially ...
assert log_has_re('Cancelling current stoploss on exchange.*', caplog)
assert trade.stoploss_order_id == "102"
assert trade.amount == 15
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_sle_cancel_cant_recreate(mocker, default_conf_usdt, fee, caplog, is_short,
limit_order) -> None:
# Sixth case: stoploss order was cancelled but couldn't create new one
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
fetch_stoploss_order=MagicMock(return_value={'status': 'canceled', 'id': 100}),
create_stoploss=MagicMock(side_effect=ExchangeError()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
trade.is_open = True
trade.stoploss_order_id = "100"
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
status='open',
)
)
assert trade
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert log_has_re(r'Stoploss order was cancelled, but unable to recreate one.*', caplog)
assert trade.stoploss_order_id is None
assert trade.is_open is True
@pytest.mark.parametrize("is_short", [False, True])
def test_create_stoploss_order_invalid_order(
mocker, default_conf_usdt, caplog, fee, is_short, limit_order
):
open_order = limit_order[entry_side(is_short)]
order = limit_order[exit_side(is_short)]
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
create_order_mock = MagicMock(side_effect=[
open_order,
order,
])
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=create_order_mock,
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
fetch_order=MagicMock(return_value={'status': 'canceled'}),
create_stoploss=MagicMock(side_effect=InvalidOrderException()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
caplog.clear()
rpc_mock.reset_mock()
freqtrade.create_stoploss_order(trade, 200)
assert trade.stoploss_order_id is None
assert trade.exit_reason == ExitType.EMERGENCY_EXIT.value
assert log_has("Unable to place a stoploss order on exchange. ", caplog)
assert log_has("Exiting the trade forcefully", caplog)
# Should call a market sell
assert create_order_mock.call_count == 2
assert create_order_mock.call_args[1]['ordertype'] == 'market'
assert create_order_mock.call_args[1]['pair'] == trade.pair
assert create_order_mock.call_args[1]['amount'] == trade.amount
# Rpc is sending first buy, then sell
assert rpc_mock.call_count == 2
assert rpc_mock.call_args_list[0][0][0]['sell_reason'] == ExitType.EMERGENCY_EXIT.value
assert rpc_mock.call_args_list[0][0][0]['order_type'] == 'market'
assert rpc_mock.call_args_list[0][0][0]['type'] == 'exit'
assert rpc_mock.call_args_list[1][0][0]['type'] == 'exit_fill'
@pytest.mark.parametrize("is_short", [False, True])
def test_create_stoploss_order_insufficient_funds(
mocker, default_conf_usdt, caplog, fee, limit_order, is_short
):
exit_order = limit_order[exit_side(is_short)]['id']
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds')
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
limit_order[entry_side(is_short)],
exit_order,
]),
get_fee=fee,
fetch_order=MagicMock(return_value={'status': 'canceled'}),
)
mocker.patch.multiple(
EXMS,
create_stoploss=MagicMock(side_effect=InsufficientFundsError()),
)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
caplog.clear()
freqtrade.create_stoploss_order(trade, 200)
# stoploss_orderid was empty before
assert trade.stoploss_order_id is None
assert mock_insuf.call_count == 1
mock_insuf.reset_mock()
trade.stoploss_order_id = 'stoploss_orderid'
freqtrade.create_stoploss_order(trade, 200)
# No change to stoploss-orderid
assert trade.stoploss_order_id == 'stoploss_orderid'
assert mock_insuf.call_count == 1
@pytest.mark.parametrize("is_short,bid,ask,stop_price,hang_price", [
(False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 3),
(True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 1.5),
])
@pytest.mark.usefixtures("init_persistence")
def test_handle_stoploss_on_exchange_trailing(
mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, hang_price
) -> None:
# When trailing stoploss is set
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
stoploss = MagicMock(return_value={'id': 13434334, 'status': 'open'})
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19,
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['trailing_stop'] = True
# disabling ROI
default_conf_usdt['minimal_roi']['0'] = 999999999
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = 0.05 if is_short else -0.05
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now() - timedelta(minutes=20)
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': hang_price,
'average': 2,
'info': {
'stopPrice': stop_price[0]
}
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
# stoploss initially at 5%
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# price jumped 2x
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': bid[0],
'ask': ask[0],
'last': bid[0],
})
)
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'})
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
# stoploss should not be updated as the interval is 60 seconds
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_not_called()
stoploss_order_mock.assert_not_called()
assert freqtrade.handle_trade(trade) is False
assert trade.stop_loss == stop_price[1]
trade.stoploss_order_id = '100'
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_called_once_with('100', 'ETH/USDT')
stoploss_order_mock.assert_called_once_with(
amount=30,
pair='ETH/USDT',
order_types=freqtrade.strategy.order_types,
stop_price=stop_price[1],
side=exit_side(is_short),
leverage=1.0
)
# price fell below stoploss, so dry-run sells trade.
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': bid[1],
'ask': ask[1],
'last': bid[1],
})
)
assert freqtrade.handle_trade(trade) is True
assert trade.stoploss_order_id is None
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_trailing_error(
mocker, default_conf_usdt, fee, caplog, limit_order, is_short
) -> None:
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'})
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
{'id': enter_order['id']},
{'id': exit_order['id']},
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['trailing_stop'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = 0.05 if is_short else -0.05
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = "abcd"
trade.stop_loss = 0.2
trade.stoploss_last_update = (dt_now() - timedelta(minutes=601)).replace(tzinfo=None)
trade.is_short = is_short
stoploss_order_hanging = {
'id': "abcd",
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'info': {
'stopPrice': '0.1'
}
}
mocker.patch(f'{EXMS}.cancel_stoploss_order',
side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.fetch_stoploss_order',
return_value=stoploss_order_hanging)
freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging)
assert log_has_re(r"Could not cancel stoploss order abcd for pair ETH/USDT.*", caplog)
# Still try to create order
assert stoploss.call_count == 1
# Fail creating stoploss order
trade.stoploss_last_update = dt_now() - timedelta(minutes=601)
caplog.clear()
cancel_mock = mocker.patch(f'{EXMS}.cancel_stoploss_order')
mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError())
freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging)
assert cancel_mock.call_count == 1
assert log_has_re(r"Could not create trailing stoploss order for pair ETH/USDT\..*", caplog)
def test_stoploss_on_exchange_price_rounding(
mocker, default_conf_usdt, fee, open_trade_usdt) -> None:
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
get_fee=fee,
)
price_mock = MagicMock(side_effect=lambda p, s, **kwargs: int(s))
stoploss_mock = MagicMock(return_value={'id': '13434334'})
adjust_mock = MagicMock(return_value=False)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss_mock,
stoploss_adjust=adjust_mock,
price_to_precision=price_mock,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
open_trade_usdt.stoploss_order_id = '13434334'
open_trade_usdt.stop_loss = 222.55
freqtrade.handle_trailing_stoploss_on_exchange(open_trade_usdt, {})
assert price_mock.call_count == 1
assert adjust_mock.call_count == 1
assert adjust_mock.call_args_list[0][0][0] == 222
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_handle_stoploss_on_exchange_custom_stop(
mocker, default_conf_usdt, fee, is_short, limit_order
) -> None:
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': 13434334, 'status': 'open'})
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['use_custom_stoploss'] = True
# disabling ROI
default_conf_usdt['minimal_roi']['0'] = 999999999
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.custom_stoploss = lambda *args, **kwargs: -0.04
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now() - timedelta(minutes=601)
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'info': {
'stopPrice': '2.0805'
}
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# price jumped 2x
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 4.38 if not is_short else 1.9 / 2,
'ask': 4.4 if not is_short else 2.2 / 2,
'last': 4.38 if not is_short else 1.9 / 2,
})
)
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'})
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
trade.stoploss_order_id = '100'
# stoploss should not be updated as the interval is 60 seconds
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_not_called()
stoploss_order_mock.assert_not_called()
assert freqtrade.handle_trade(trade) is False
assert trade.stop_loss == 4.4 * 0.96 if not is_short else 1.1
assert trade.stop_loss_pct == -0.04 if not is_short else 0.04
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_called_once_with('100', 'ETH/USDT')
# Long uses modified ask - offset, short modified bid + offset
stoploss_order_mock.assert_called_once_with(
amount=pytest.approx(trade.amount),
pair='ETH/USDT',
order_types=freqtrade.strategy.order_types,
stop_price=4.4 * 0.96 if not is_short else 0.95 * 1.04,
side=exit_side(is_short),
leverage=1.0
)
# price fell below stoploss, so dry-run sells trade.
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 4.17,
'ask': 4.19,
'last': 4.17
})
)
assert freqtrade.handle_trade(trade) is True
def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_order) -> None:
enter_order = limit_order['buy']
exit_order = limit_order['sell']
enter_order['average'] = 2.19
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'})
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['max_open_trades'] = float('inf')
edge_conf['dry_run_wallet'] = 999.9
edge_conf['exchange']['name'] = 'binance'
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss,
)
# enabling TSL
edge_conf['trailing_stop'] = True
edge_conf['trailing_stop_positive'] = 0.01
edge_conf['trailing_stop_positive_offset'] = 0.011
# disabling ROI
edge_conf['minimal_roi']['0'] = 999999999
freqtrade = FreqtradeBot(edge_conf)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = -0.02
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
patch_get_signal(freqtrade)
freqtrade.active_pair_whitelist = freqtrade.edge.adjust(freqtrade.active_pair_whitelist)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now()
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'stopPrice': '2.178'
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
# stoploss initially at 20% as edge dictated it.
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert pytest.approx(trade.stop_loss) == 1.76
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock()
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
# price goes down 5%
mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={
'bid': 2.19 * 0.95,
'ask': 2.2 * 0.95,
'last': 2.19 * 0.95
}))
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# stoploss should remain the same
assert pytest.approx(trade.stop_loss) == 1.76
# stoploss on exchange should not be canceled
cancel_order_mock.assert_not_called()
# price jumped 2x
mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={
'bid': 4.38,
'ask': 4.4,
'last': 4.38
}))
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# stoploss should be set to 1% as trailing is on
assert trade.stop_loss == 4.4 * 0.99
cancel_order_mock.assert_called_once_with('100', 'NEO/BTC')
stoploss_order_mock.assert_called_once_with(
amount=30,
pair='NEO/BTC',
order_types=freqtrade.strategy.order_types,
stop_price=4.4 * 0.99,
side='sell',
leverage=1.0
)
@pytest.mark.parametrize('return_value,side_effect,log_message', [
(False, None, 'Found no enter signals for whitelisted currencies. Trying again...'),
(None, DependencyException, 'Unable to create trade for ETH/USDT: ')
])
def test_enter_positions(mocker, default_conf_usdt, return_value, side_effect,
log_message, caplog) -> None:
caplog.set_level(logging.DEBUG)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_ct = mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.create_trade',
MagicMock(
return_value=return_value,
side_effect=side_effect
)
)
n = freqtrade.enter_positions()
assert n == 0
assert log_has(log_message, caplog)
# create_trade should be called once for every pair in the whitelist.
assert mock_ct.call_count == len(default_conf_usdt['exchange']['pair_whitelist'])
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=limit_order[entry_side(is_short)])
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
order_id = '123'
trade = Trade(
pair='ETH/USDT',
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
stake_amount=0.01,
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
ft_pair=trade.pair,
ft_amount=trade.amount,
ft_price=trade.open_rate,
order_id=order_id,
))
Trade.session.add(trade)
Trade.commit()
trades = [trade]
freqtrade.wallets.update()
n = freqtrade.exit_positions(trades)
assert n == 0
# Test amount not modified by fee-logic
assert not log_has_re(r'Applying fee to amount for Trade .*', caplog)
gra = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
# test amount modified by fee-logic
n = freqtrade.exit_positions(trades)
assert n == 0
assert gra.call_count == 0
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog, is_short) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order = limit_order[entry_side(is_short)]
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
order_id = '123'
trade = Trade(
pair='ETH/USDT',
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
stake_amount=0.01,
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
ft_pair=trade.pair,
ft_amount=trade.amount,
ft_price=trade.open_rate,
order_id=order_id,
ft_is_open=False,
))
Trade.session.add(trade)
Trade.commit()
freqtrade.wallets.update()
trades = [trade]
# Test raise of DependencyException exception
mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.handle_trade',
side_effect=DependencyException()
)
caplog.clear()
n = freqtrade.exit_positions(trades)
assert n == 0
assert log_has('Unable to exit trade ETH/USDT: ', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order = limit_order[entry_side(is_short)]
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
order_id = order['id']
trade = Trade(
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
order_id=order_id,
))
assert not freqtrade.update_trade_state(trade, None)
assert log_has_re(r'Orderid for trade .* is empty.', caplog)
caplog.clear()
# Add datetime explicitly since sqlalchemy defaults apply only once written to database
freqtrade.update_trade_state(trade, order_id)
# Test amount not modified by fee-logic
assert not log_has_re(r'Applying fee to .*', caplog)
caplog.clear()
assert not trade.has_open_orders
assert trade.amount == order['amount']
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.01)
assert trade.amount == 30.0
# test amount modified by fee-logic
freqtrade.update_trade_state(trade, order_id)
assert trade.amount == 29.99
assert not trade.has_open_orders
trade.is_open = True
# Assert we call handle_trade() if trade is feasible for execution
freqtrade.update_trade_state(trade, order_id)
assert log_has_re('Found open order for.*', caplog)
limit_buy_order_usdt_new = deepcopy(limit_order)
limit_buy_order_usdt_new['filled'] = 0.0
limit_buy_order_usdt_new['status'] = 'canceled'
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', side_effect=ValueError)
mocker.patch(f'{EXMS}.fetch_order', return_value=limit_buy_order_usdt_new)
res = freqtrade.update_trade_state(trade, order_id)
# Cancelled empty
assert res is True
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('initial_amount,has_rounding_fee', [
(30.0 + 1e-14, True),
(8.0, False)
])
def test_update_trade_state_withorderdict(
default_conf_usdt, trades_for_order, limit_order, fee, mocker, initial_amount,
has_rounding_fee, is_short, caplog
):
order = limit_order[entry_side(is_short)]
trades_for_order[0]['amount'] = initial_amount
order_id = "oid_123456"
order['id'] = order_id
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
# fetch_order should not be called!!
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError))
patch_exchange(mocker)
amount = sum(x['amount'] for x in trades_for_order)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
caplog.clear()
trade = Trade(
pair='LTC/USDT',
amount=amount,
exchange='binance',
open_rate=2.0,
open_date=dt_now(),
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
leverage=1,
is_short=is_short,
)
trade.orders.append(
Order(
ft_order_side=entry_side(is_short),
ft_pair=trade.pair,
ft_is_open=True,
order_id=order_id,
)
)
log_text = r'Applying fee on amount for .*'
freqtrade.update_trade_state(trade, order_id, order)
assert trade.amount != amount
if has_rounding_fee:
assert pytest.approx(trade.amount) == 29.992
assert log_has_re(log_text, caplog)
else:
assert pytest.approx(trade.amount) == order['amount']
assert not log_has_re(log_text, caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state_exception(mocker, default_conf_usdt, is_short, limit_order,
caplog) -> None:
order = limit_order[entry_side(is_short)]
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
# TODO: should not be magicmock
trade = MagicMock()
trade.amount = 123
open_order_id = '123'
# Test raise of OperationalException exception
mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
side_effect=DependencyException()
)
freqtrade.update_trade_state(trade, open_order_id)
assert log_has('Could not update trade amount: ', caplog)
def test_update_trade_state_orderexception(mocker, default_conf_usdt, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=InvalidOrderException))
# TODO: should not be magicmock
trade = MagicMock()
open_order_id = '123'
# Test raise of OperationalException exception
grm_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", MagicMock())
freqtrade.update_trade_state(trade, open_order_id)
assert grm_mock.call_count == 0
assert log_has(f'Unable to fetch order {open_order_id}: ', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state_sell(
default_conf_usdt, trades_for_order, limit_order_open, limit_order, is_short, mocker
):
buy_order = limit_order[entry_side(is_short)]
open_order = limit_order_open[exit_side(is_short)]
l_order = limit_order[exit_side(is_short)]
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
# fetch_order should not be called!!
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError))
wallet_mock = MagicMock()
mocker.patch('freqtrade.wallets.Wallets.update', wallet_mock)
patch_exchange(mocker)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
amount = l_order["amount"]
wallet_mock.reset_mock()
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=0.0025,
fee_close=0.0025,
open_date=dt_now(),
is_open=True,
interest_rate=0.0005,
leverage=1,
is_short=is_short,
)
order = Order.parse_from_ccxt_object(buy_order, 'LTC/ETH', entry_side(is_short))
trade.orders.append(order)
order = Order.parse_from_ccxt_object(open_order, 'LTC/ETH', exit_side(is_short))
trade.orders.append(order)
assert order.status == 'open'
freqtrade.update_trade_state(trade, trade.open_orders_ids[-1], l_order)
assert trade.amount == l_order['amount']
# Wallet needs to be updated after closing a limit-sell order to reenable buying
assert wallet_mock.call_count == 1
assert not trade.is_open
# Order is updated by update_trade_state
assert order.status == 'closed'
@pytest.mark.parametrize('is_short,close_profit', [
(False, 0.09451372),
(True, 0.08635224),
])
def test_handle_trade(
default_conf_usdt, limit_order_open, limit_order, fee, mocker, is_short, close_profit
) -> None:
open_order = limit_order_open[exit_side(is_short)]
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
enter_order,
open_order,
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
time.sleep(0.01) # Race condition fix
assert trade.is_open is True
freqtrade.wallets.update()
patch_get_signal(freqtrade, enter_long=False, exit_short=is_short,
exit_long=not is_short, exit_tag='sell_signal1')
assert freqtrade.handle_trade(trade) is True
assert trade.open_orders_ids[-1] == exit_order['id']
# Simulate fulfilled LIMIT_SELL order for trade
trade.orders[-1].ft_is_open = False
trade.orders[-1].status = 'closed'
trade.orders[-1].filled = trade.orders[-1].remaining
trade.orders[-1].remaining = 0.0
trade.update_trade(trade.orders[-1])
assert trade.close_rate == (2.0 if is_short else 2.2)
assert pytest.approx(trade.close_profit) == close_profit
assert pytest.approx(trade.calc_profit(trade.close_rate)) == 5.685
assert trade.close_date is not None
assert trade.exit_reason == 'sell_signal1'
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_overlapping_signals(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, is_short
) -> None:
open_order = limit_order_open[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
open_order,
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
# Buy and Sell triggering, so doing nothing ...
trades = Trade.session.scalars(select(Trade)).all()
nb_trades = len(trades)
assert nb_trades == 0
# Buy is triggering, so buying ...
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Buy and Sell are not triggering, so doing nothing ...
patch_get_signal(freqtrade, enter_long=False)
assert freqtrade.handle_trade(trades[0]) is False
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Buy and Sell are triggering, so doing nothing ...
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
assert freqtrade.handle_trade(trades[0]) is False
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Sell is triggering, guess what : we are Selling!
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
assert freqtrade.handle_trade(trades[0]) is True
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog,
is_short) -> None:
open_order = limit_order_open[entry_side(is_short)]
caplog.set_level(logging.DEBUG)
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
open_order,
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
# FIX: sniffing logs, suggest handle_trade should not execute_trade_exit
# instead that responsibility should be moved out of handle_trade(),
# we might just want to check if we are in a sell condition without
# executing
# if ROI is reached we must sell
caplog.clear()
patch_get_signal(freqtrade)
assert freqtrade.handle_trade(trade)
assert log_has("ETH/USDT - Required profit reached. exit_type=ExitType.ROI",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_trade_use_exit_signal(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog, is_short
) -> None:
enter_open_order = limit_order_open[exit_side(is_short)]
exit_open_order = limit_order_open[entry_side(is_short)]
# use_exit_signal is True buy default
caplog.set_level(logging.DEBUG)
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
enter_open_order,
exit_open_order,
]),
get_fee=fee,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
patch_get_signal(freqtrade, enter_long=False, exit_long=False)
assert not freqtrade.handle_trade(trade)
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
assert freqtrade.handle_trade(trade)
assert log_has("ETH/USDT - Sell signal received. exit_type=ExitType.EXIT_SIGNAL",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_close_trade(
default_conf_usdt, ticker_usdt, limit_order_open, limit_order, fee, mocker, is_short
) -> None:
open_order = limit_order_open[exit_side(is_short)]
enter_order = limit_order[exit_side(is_short)]
exit_order = limit_order[entry_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=open_order),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create trade and sell it
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.entry_side)
trade.update_trade(oobj)
oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], trade.exit_side)
trade.update_trade(oobj)
assert trade.is_open is False
with pytest.raises(DependencyException, match=r'.*closed trade.*'):
freqtrade.handle_trade(trade)
def test_bot_loop_start_called_once(mocker, default_conf_usdt, caplog):
ftbot = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.create_trade')
patch_get_signal(ftbot)
ftbot.strategy.bot_loop_start = MagicMock(side_effect=ValueError)
ftbot.strategy.analyze = MagicMock()
ftbot.process()
assert log_has_re(r'Strategy caused the following exception.*', caplog)
assert ftbot.strategy.bot_loop_start.call_count == 1
assert ftbot.strategy.analyze.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_entry_usercustom(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, is_short
) -> None:
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders_ids[0]
default_conf_usdt["unfilledtimeout"] = {"entry": 1400, "exit": 30}
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock(return_value=old_order)
cancel_enter_order = deepcopy(old_order)
cancel_enter_order['status'] = 'canceled'
cancel_order_wr_mock = MagicMock(return_value=cancel_enter_order)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order=cancel_order_mock,
cancel_order_with_result=cancel_order_wr_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
open_trade.orders[0].side = 'sell' if is_short else 'buy'
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
Trade.session.add(open_trade)
Trade.commit()
# Ensure default is to return empty (so not mocked yet)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
# Return false - trade remains open
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 1
freqtrade.strategy.check_entry_timeout = MagicMock(side_effect=KeyError)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 1
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=True)
# Trade should be closed since the function returns true
freqtrade.manage_open_orders()
assert cancel_order_wr_mock.call_count == 1
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 0
assert freqtrade.strategy.check_entry_timeout.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_entry(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, is_short
) -> None:
old_order = limit_sell_order_old if is_short else limit_buy_order_old
rpc_mock = patch_RPCManager(mocker)
order = Order.parse_from_ccxt_object(old_order, 'mocked', 'buy')
open_trade.orders[0] = order
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1234)
# check it does cancel entry orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 0
# Custom user entry-timeout is never called
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# Entry adjustment is never called
assert freqtrade.strategy.adjust_entry_price.call_count == 0
@pytest.mark.parametrize("is_short", [False, True])
def test_adjust_entry_cancel(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders[0].order_id
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee
)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# Timeout to not interfere
freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False)
# check that order is cancelled
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=None)
freqtrade.manage_open_orders()
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 0
assert len(Order.session.scalars(select(Order)).all()) == 0
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order user requested order cancel*", caplog)
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order fully cancelled.*", caplog)
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_adjust_entry_maintain_replace(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders_ids[0]
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# Timeout to not interfere
freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False)
# Check that order is maintained
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=old_order['price'])
freqtrade.manage_open_orders()
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
assert len(Order.get_open_orders()) == 1
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
# Check that order is replaced
freqtrade.get_valid_enter_price_and_stake = MagicMock(return_value={100, 10, 1})
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1234)
freqtrade.manage_open_orders()
assert freqtrade.strategy.adjust_entry_price.call_count == 1
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
nb_all_orders = len(Order.session.scalars(select(Order)).all())
assert nb_all_orders == 2
# New order seems to be in closed status?
# nb_open_orders = len(Order.get_open_orders())
# assert nb_open_orders == 1
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order cancelled to be replaced*", caplog)
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_check_handle_cancelled_buy(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
""" Handle Buy order cancelled on exchange"""
old_order = limit_sell_order_old if is_short else limit_buy_order_old
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
old_order.update({"status": "canceled", 'filled': 0.0})
old_order['side'] = 'buy' if is_short else 'sell'
old_order['id'] = open_trade.open_orders[0].order_id
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 0
exit_name = 'Buy' if is_short else 'Sell'
assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_buy_exception(
default_conf_usdt, ticker_usdt, open_trade, is_short, fee, mocker
) -> None:
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
validate_pairs=MagicMock(),
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(side_effect=ExchangeError),
cancel_order=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert len(open_trade.open_orders) == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_exit_usercustom(
default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker,
is_short, open_trade_usdt, caplog
) -> None:
default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1}
if is_short:
limit_sell_order_old['side'] = 'buy'
open_trade_usdt.is_short = is_short
open_exit_order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked',
'buy' if is_short else 'sell')
open_trade_usdt.orders[-1] = open_exit_order
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0)
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.close_profit_abs = 0.001
Trade.session.add(open_trade_usdt)
Trade.commit()
# Ensure default is false
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=False)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
# Return false - No impact
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
freqtrade.strategy.check_exit_timeout = MagicMock(side_effect=KeyError)
freqtrade.strategy.check_entry_timeout = MagicMock(side_effect=KeyError)
# Return Error - No impact
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# Return True - sells!
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=True)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=True)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# 2nd canceled trade - Fail execute exit
caplog.clear()
mocker.patch('freqtrade.persistence.Trade.get_exit_order_count', return_value=1)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit',
side_effect=DependencyException)
freqtrade.manage_open_orders()
assert log_has_re('Unable to emergency exit .*', caplog)
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
caplog.clear()
# 2nd canceled trade ...
# If cancelling fails - no emergency exit!
with patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit', return_value=False):
freqtrade.manage_open_orders()
assert et_mock.call_count == 0
freqtrade.manage_open_orders()
assert log_has_re('Emergency exiting trade.*', caplog)
assert et_mock.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_exit(
default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker, is_short, open_trade_usdt
) -> None:
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
limit_sell_order_old['id'] = '123456789_exit'
limit_sell_order_old['side'] = 'buy' if is_short else 'sell'
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order=cancel_order_mock,
get_min_pair_stake_amount=MagicMock(return_value=0),
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.close_profit_abs = 0.001
open_trade_usdt.is_short = is_short
Trade.session.add(open_trade_usdt)
Trade.commit()
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=False)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
# check it does cancel sell orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
assert open_trade_usdt.is_open is True
# Custom user sell-timeout is never called
assert freqtrade.strategy.check_exit_timeout.call_count == 0
assert freqtrade.strategy.check_entry_timeout.call_count == 0
@pytest.mark.parametrize("is_short", [False, True])
def test_check_handle_cancelled_exit(
default_conf_usdt, ticker_usdt, limit_sell_order_old, open_trade_usdt,
is_short, mocker, caplog
) -> None:
""" Handle sell order cancelled on exchange"""
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
limit_sell_order_old.update({"status": "canceled", 'filled': 0.0})
limit_sell_order_old['side'] = 'buy' if is_short else 'sell'
limit_sell_order_old['id'] = open_trade_usdt.open_orders[0].order_id
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order_with_result=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.is_short = is_short
Trade.session.add(open_trade_usdt)
Trade.commit()
# check it does cancel sell orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 2
assert open_trade_usdt.is_open is True
exit_name = 'Buy' if is_short else 'Sell'
assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize("leverage", [1, 3, 5, 10])
def test_manage_open_orders_partial(
default_conf_usdt, ticker_usdt, limit_buy_order_old_partial, is_short, leverage,
open_trade, mocker
) -> None:
rpc_mock = patch_RPCManager(mocker)
open_trade.is_short = is_short
open_trade.leverage = leverage
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id
limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy'
limit_buy_canceled = deepcopy(limit_buy_order_old_partial)
limit_buy_canceled['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_canceled)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
prior_stake = open_trade.stake_amount
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
# note this is for a partially-complete buy order
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
).all()
assert len(trades) == 1
assert trades[0].amount == 23.0
assert trades[0].stake_amount == open_trade.open_rate * trades[0].amount / leverage
assert trades[0].stake_amount != prior_stake
assert not trades[0].has_open_orders
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_partial_fee(
default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short,
limit_buy_order_old_partial, trades_for_order,
limit_buy_order_old_partial_canceled, mocker
) -> None:
open_trade.is_short = is_short
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
rpc_mock = patch_RPCManager(mocker)
limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id
limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0]
limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy'
limit_buy_order_old_partial_canceled['side'] = 'sell' if is_short else 'buy'
cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled)
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=0))
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock,
get_trades_for_order=MagicMock(return_value=trades_for_order),
)
freqtrade = FreqtradeBot(default_conf_usdt)
assert open_trade.amount == limit_buy_order_old_partial['amount']
open_trade.fee_open = fee()
open_trade.fee_close = fee()
Trade.session.add(open_trade)
Trade.commit()
# cancelling a half-filled order should update the amount to the bought amount
# and apply fees if necessary.
freqtrade.manage_open_orders()
assert log_has_re(r"Applying fee on amount for Trade.*", caplog)
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
# Verify that trade has been updated
assert trades[0].amount == (limit_buy_order_old_partial['amount'] -
limit_buy_order_old_partial['remaining']) - 0.023
assert not trades[0].has_open_orders
assert trades[0].fee_updated(open_trade.entry_side)
assert pytest.approx(trades[0].fee_open) == 0.001
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_partial_except(
default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short,
limit_buy_order_old_partial, trades_for_order,
limit_buy_order_old_partial_canceled, mocker
) -> None:
open_trade.is_short = is_short
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
rpc_mock = patch_RPCManager(mocker)
limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0]
limit_buy_order_old_partial['id'] = open_trade.open_orders_ids[0]
if is_short:
limit_buy_order_old_partial['side'] = 'sell'
cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock,
get_trades_for_order=MagicMock(return_value=trades_for_order),
)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
MagicMock(side_effect=DependencyException))
freqtrade = FreqtradeBot(default_conf_usdt)
assert open_trade.amount == limit_buy_order_old_partial['amount']
open_trade.fee_open = fee()
open_trade.fee_close = fee()
Trade.session.add(open_trade)
Trade.commit()
# cancelling a half-filled order should update the amount to the bought amount
# and apply fees if necessary.
freqtrade.manage_open_orders()
assert log_has_re(r"Could not update trade amount: .*", caplog)
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
).all()
assert len(trades) == 1
# Verify that trade has been updated
assert trades[0].amount == (limit_buy_order_old_partial['amount'] -
limit_buy_order_old_partial['remaining'])
assert not trades[0].has_open_orders
assert trades[0].fee_open == fee()
def test_manage_open_orders_exception(default_conf_usdt, ticker_usdt, open_trade_usdt, mocker,
caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = MagicMock()
mocker.patch.multiple(
'freqtrade.freqtradebot.FreqtradeBot',
handle_cancel_enter=MagicMock(),
handle_cancel_exit=MagicMock(),
)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(side_effect=ExchangeError('Oh snap')),
cancel_order=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
Trade.session.add(open_trade_usdt)
Trade.commit()
caplog.clear()
freqtrade.manage_open_orders()
assert log_has_re(r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30.00000000, "
r"is_short=False, leverage=1.0, "
r"open_rate=2.00000000, open_since="
f"{open_trade_usdt.open_date.strftime('%Y-%m-%d %H:%M:%S')}"
r"\) due to Traceback \(most recent call last\):\n*",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_short, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
l_order = limit_order[entry_side(is_short)]
cancel_buy_order = deepcopy(limit_order[entry_side(is_short)])
cancel_buy_order['status'] = 'canceled'
del cancel_buy_order['filled']
cancel_order_mock = MagicMock(return_value=cancel_buy_order)
mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade._notify_enter_cancel = MagicMock()
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
l_order['filled'] = 0.0
l_order['status'] = 'open'
reason = CANCEL_REASON['TIMEOUT']
assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
cancel_order_mock.reset_mock()
caplog.clear()
l_order['filled'] = 0.01
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 0
assert log_has_re("Order .* for .* not cancelled, as the filled amount.* unexitable.*", caplog)
caplog.clear()
cancel_order_mock.reset_mock()
l_order['filled'] = 2
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
# Order remained open for some reason (cancel failed)
cancel_buy_order['status'] = 'open'
cancel_order_mock = MagicMock(return_value=cancel_buy_order)
mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock)
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert log_has_re(r"Order .* for .* not cancelled.", caplog)
# min_pair_stake empty should not crash
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=None)
assert not freqtrade.handle_cancel_enter(
trade, limit_order[entry_side(is_short)], trade.open_orders_ids[0], reason
)
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize("limit_buy_order_canceled_empty", ['binance', 'kraken', 'bittrex'],
indirect=['limit_buy_order_canceled_empty'])
def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_short, fee,
limit_buy_order_canceled_empty) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = mocker.patch(
f'{EXMS}.cancel_order_with_result',
return_value=limit_buy_order_canceled_empty)
notify_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter_cancel')
freqtrade = FreqtradeBot(default_conf_usdt)
reason = CANCEL_REASON['TIMEOUT']
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
assert freqtrade.handle_cancel_enter(
trade, limit_buy_order_canceled_empty, trade.open_orders_ids[0], reason
)
assert cancel_order_mock.call_count == 0
assert log_has_re(
f'{trade.entry_side.capitalize()} order fully cancelled. '
r'Removing .* from database\.',
caplog
)
assert notify_mock.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('cancelorder', [
{},
{'remaining': None},
'String Return value',
123
])
def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order, is_short, fee,
cancelorder) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
l_order = limit_order[entry_side(is_short)]
cancel_order_mock = MagicMock(return_value=cancelorder)
mocker.patch.multiple(
EXMS,
cancel_order=cancel_order_mock,
fetch_order=MagicMock(side_effect=InvalidOrderException)
)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade._notify_enter_cancel = MagicMock()
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
l_order['filled'] = 0.0
l_order['status'] = 'open'
reason = CANCEL_REASON['TIMEOUT']
assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
cancel_order_mock.reset_mock()
l_order['filled'] = 1.0
order = deepcopy(l_order)
order['status'] = 'canceled'
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
@pytest.mark.parametrize('is_short', [True, False])
@pytest.mark.parametrize('leverage', [1, 5])
@pytest.mark.parametrize('amount', [2, 50])
def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short,
leverage, amount) -> None:
send_msg_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = MagicMock()
mocker.patch.multiple(
EXMS,
cancel_order=cancel_order_mock,
)
entry_price = 0.245441
mocker.patch(f'{EXMS}.get_rate', return_value=entry_price)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.2)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee')
freqtrade = FreqtradeBot(default_conf_usdt)
trade = Trade(
pair='LTC/USDT',
amount=amount * leverage,
exchange='binance',
open_rate=entry_price,
open_date=dt_now() - timedelta(days=2),
fee_open=fee.return_value,
fee_close=fee.return_value,
close_rate=0.555,
close_date=dt_now(),
exit_reason="sell_reason_whatever",
stake_amount=entry_price * amount,
leverage=leverage,
is_short=is_short,
)
trade.orders = [
Order(
ft_order_side=entry_side(is_short),
ft_pair=trade.pair,
ft_is_open=False,
order_id='buy_123456',
status="closed",
symbol=trade.pair,
order_type="market",
side=entry_side(is_short),
price=trade.open_rate,
average=trade.open_rate,
filled=trade.amount,
remaining=0,
cost=trade.open_rate * trade.amount,
order_date=trade.open_date,
order_filled_date=trade.open_date,
),
Order(
ft_order_side=exit_side(is_short),
ft_pair=trade.pair,
ft_is_open=True,
order_id='sell_123456',
status="open",
symbol=trade.pair,
order_type="limit",
side=exit_side(is_short),
price=trade.open_rate,
average=trade.open_rate,
filled=0.0,
remaining=trade.amount,
cost=trade.open_rate * trade.amount,
order_date=trade.open_date,
order_filled_date=trade.open_date,
),
]
order = {'id': "sell_123456",
'remaining': 1,
'amount': 1,
'status': "open"}
reason = CANCEL_REASON['TIMEOUT']
send_msg_mock.reset_mock()
assert freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert cancel_order_mock.call_count == 1
assert send_msg_mock.call_count == 1
assert trade.close_rate is None
assert trade.exit_reason is None
assert not trade.has_open_orders
send_msg_mock.reset_mock()
# Partial exit - below exit threshold
order['amount'] = amount * leverage
order['filled'] = amount * 0.99 * leverage
assert not freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
# Assert cancel_order was not called (callcount remains unchanged)
assert cancel_order_mock.call_count == 1
assert send_msg_mock.call_count == 1
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'])
assert not freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'])
# Message should not be iterated again
assert trade.exit_order_status == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
assert send_msg_mock.call_count == 1
send_msg_mock.reset_mock()
order['filled'] = amount * 0.5 * leverage
assert freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert send_msg_mock.call_count == 1
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED'])
def test_handle_cancel_exit_cancel_exception(mocker, default_conf_usdt) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0)
mocker.patch(f'{EXMS}.cancel_order_with_result', side_effect=InvalidOrderException())
freqtrade = FreqtradeBot(default_conf_usdt)
# TODO: should not be magicmock
trade = MagicMock()
order_id = '125'
reason = CANCEL_REASON['TIMEOUT']
order = {'remaining': 1,
'id': '125',
'amount': 1,
'status': "open"}
assert not freqtrade.handle_cancel_exit(trade, order, order_id, reason)
# mocker.patch(f'{EXMS}.cancel_order_with_result', return_value=order)
# assert not freqtrade.handle_cancel_exit(trade, order, reason)
@pytest.mark.parametrize("is_short, open_rate, amt", [
(False, 2.0, 30.0),
(True, 2.02, 29.70297029),
])
def test_execute_trade_exit_up(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, mocker,
ticker_usdt_sell_down, is_short, open_rate, amt) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=False)
# Create some test data
freqtrade.enter_positions()
rpc_mock.reset_mock()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert trade
assert freqtrade.strategy.confirm_trade_exit.call_count == 0
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_down if is_short else ticker_usdt_sell_up
)
# Prevented sell ...
freqtrade.execute_trade_exit(
trade=trade,
limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']),
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert rpc_mock.call_count == 0
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert id(freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade']) != id(trade)
assert freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade'].id == trade.id
# Repatch with true
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True)
freqtrade.execute_trade_exit(
trade=trade,
limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']),
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert rpc_mock.call_count == 1
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'trade_id': 1,
'type': RPCMessageType.EXIT,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'gain': 'profit',
'limit': 2.0 if is_short else 2.2,
'order_rate': 2.0 if is_short else 2.2,
'amount': pytest.approx(amt),
'order_type': 'limit',
'buy_tag': None,
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': 2.01 if is_short else 2.3,
'profit_amount': 0.29554455 if is_short else 5.685,
'profit_ratio': 0.00493809 if is_short else 0.09451372,
'stake_currency': 'USDT',
'fiat_currency': 'USD',
'base_currency': 'ETH',
'sell_reason': ExitType.ROI.value,
'exit_reason': ExitType.ROI.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_down(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_down,
ticker_usdt_sell_up, mocker, is_short) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down
)
freqtrade.execute_trade_exit(
trade=trade, limit=(ticker_usdt_sell_up if is_short else ticker_usdt_sell_down)()['bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert rpc_mock.call_count == 2
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': 'loss',
'limit': 2.2 if is_short else 2.01,
'order_rate': 2.2 if is_short else 2.01,
'amount': pytest.approx(29.70297029) if is_short else 30.0,
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': 2.02 if is_short else 2.0,
'current_rate': 2.2 if is_short else 2.0,
'profit_amount': -5.65990099 if is_short else -0.00075,
'profit_ratio': -0.0945681 if is_short else -1.247e-05,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': ExitType.STOP_LOSS.value,
'exit_reason': ExitType.STOP_LOSS.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize(
"is_short,amount,open_rate,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [
(False, 30, 2.0, 2.3, 2.25, 7.18125, 0.11938903, 'profit'),
(True, 29.70297029, 2.02, 2.2, 2.25, -7.14876237, -0.11944465, 'loss'),
])
def test_execute_trade_exit_custom_exit_price(
default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, amount, open_rate,
current_rate, limit, profit_amount, profit_ratio, profit_or_loss, mocker) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
config = deepcopy(default_conf_usdt)
config['custom_price_max_distance_ratio'] = 0.1
patch_whitelist(mocker, config)
freqtrade = FreqtradeBot(config)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=False)
# Create some test data
freqtrade.enter_positions()
rpc_mock.reset_mock()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert freqtrade.strategy.confirm_trade_exit.call_count == 0
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True)
# Set a custom exit price
freqtrade.strategy.custom_exit_price = lambda **kwargs: 2.25
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL, exit_reason='foo')
)
# Sell price must be different to default bid price
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert rpc_mock.call_count == 1
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'trade_id': 1,
'type': RPCMessageType.EXIT,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': profit_or_loss,
'limit': limit,
'order_rate': limit,
'amount': pytest.approx(amount),
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': current_rate,
'profit_amount': pytest.approx(profit_amount),
'profit_ratio': profit_ratio,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': 'foo',
'exit_reason': 'foo',
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_down_stoploss_on_exchange_dry_run(
default_conf_usdt, ticker_usdt, fee, is_short, ticker_usdt_sell_down,
ticker_usdt_sell_up, mocker) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down
)
default_conf_usdt['dry_run'] = True
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# Setting trade stoploss to 0.01
trade.stop_loss = 2.0 * 1.01 if is_short else 2.0 * 0.99
freqtrade.execute_trade_exit(
trade=trade, limit=trade.stop_loss,
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert rpc_mock.call_count == 2
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': 'loss',
'limit': 2.02 if is_short else 1.98,
'order_rate': 2.02 if is_short else 1.98,
'amount': pytest.approx(29.70297029 if is_short else 30.0),
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': 2.02 if is_short else 2.0,
'current_rate': 2.2 if is_short else 2.0,
'profit_amount': -0.3 if is_short else -0.8985,
'profit_ratio': -0.00501253 if is_short else -0.01493766,
'stake_currency': 'USDT',
'fiat_currency': 'USD',
'base_currency': 'ETH',
'sell_reason': ExitType.STOP_LOSS.value,
'exit_reason': ExitType.STOP_LOSS.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
def test_execute_trade_exit_sloe_cancel_exception(
mocker, default_conf_usdt, ticker_usdt, fee, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.cancel_stoploss_order', side_effect=InvalidOrderException())
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=300))
create_order_mock = MagicMock(side_effect=[
{'id': '12345554'},
{'id': '12345555'},
])
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
create_order=create_order_mock,
)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
PairLock.session = MagicMock()
freqtrade.config['dry_run'] = False
trade.stoploss_order_id = "abcd"
freqtrade.execute_trade_exit(trade=trade, limit=1234,
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert create_order_mock.call_count == 2
assert log_has('Could not cancel stoploss order abcd for pair ETH/USDT', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_with_stoploss_on_exchange(
default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, mocker) -> None:
default_conf_usdt['exchange']['name'] = 'binance'
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
stoploss = MagicMock(return_value={
'id': 123,
'status': 'open',
'info': {
'foo': 'bar'
}
})
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee')
cancel_order = MagicMock(return_value=True)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
create_stoploss=stoploss,
cancel_stoploss_order=cancel_order,
_dry_is_price_crossed=MagicMock(side_effect=[True, False]),
)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
trades = [trade]
freqtrade.manage_open_orders()
freqtrade.exit_positions(trades)
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)
)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert cancel_order.call_count == 1
assert rpc_mock.call_count == 4
@pytest.mark.parametrize("is_short", [False, True])
def test_may_execute_trade_exit_after_stoploss_on_exchange_hit(
default_conf_usdt, ticker_usdt, fee, mocker, is_short) -> None:
default_conf_usdt['exchange']['name'] = 'binance'
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
_dry_is_price_crossed=MagicMock(side_effect=[False, True]),
)
stoploss = MagicMock(return_value={
'id': 123,
'info': {
'foo': 'bar'
}
})
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short)
# Create some test data
freqtrade.enter_positions()
freqtrade.manage_open_orders()
trade = Trade.session.scalars(select(Trade)).first()
trades = [trade]
assert trade.stoploss_order_id is None
freqtrade.exit_positions(trades)
assert trade
assert trade.stoploss_order_id == '123'
assert not trade.has_open_orders
# Assuming stoploss on exchange is hit
# stoploss_order_id should become None
# and trade should be sold at the price of stoploss
stoploss_executed = MagicMock(return_value={
"id": "123",
"timestamp": 1542707426845,
"datetime": "2018-11-20T09:50:26.845Z",
"lastTradeTimestamp": None,
"symbol": "BTC/USDT",
"type": "stop_loss_limit",
"side": "buy" if is_short else "sell",
"price": 1.08801,
"amount": trade.amount,
"cost": 1.08801 * trade.amount,
"average": 1.08801,
"filled": trade.amount,
"remaining": 0.0,
"status": "closed",
"fee": None,
"trades": None
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_executed)
freqtrade.exit_positions(trades)
assert trade.stoploss_order_id is None
assert trade.is_open is False
assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value
assert rpc_mock.call_count == 4
assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY
assert rpc_mock.call_args_list[1][0][0]['amount'] > 20
assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.ENTRY_FILL
assert rpc_mock.call_args_list[3][0][0]['type'] == RPCMessageType.EXIT_FILL
@pytest.mark.parametrize(
"is_short,amount,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [
(False, 30, 2.3, 2.2, 5.685, 0.09451372, 'profit'),
(True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'),
])
def test_execute_trade_exit_market_order(
default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, caplog,
limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker
) -> None:
"""
amount
long: 60 / 2.0 = 30
short: 60 / 2.02 = 29.70297029
open_value
long: (30 * 2.0) + (30 * 2.0 * 0.0025) = 60.15
short: (29.702970297029704 * 2.02) - (29.702970297029704 * 2.02 * 0.0025) = 59.85
close_value
long: (30 * 2.2) - (30 * 2.2 * 0.0025) = 65.835
short: (29.702970297029704 * 2.3) + (29.702970297029704 * 2.3 * 0.0025) = 68.48762376237624
profit
long: 65.835 - 60.15 = 5.684999999999995
short: 59.85 - 68.48762376237624 = -8.637623762376244
profit_ratio
long: (65.835/60.15) - 1 = 0.0945137157107232
short: 1 - (68.48762376237624/59.85) = -0.1443211990371971
"""
open_rate = ticker_usdt.return_value['ask' if is_short else 'bid']
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=True),
get_funding_fees=MagicMock(side_effect=ExchangeError()),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up,
_dry_is_price_crossed=MagicMock(return_value=False),
)
freqtrade.config['order_types']['exit'] = 'market'
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert log_has("Could not update funding fee.", caplog)
assert not trade.is_open
assert pytest.approx(trade.close_profit) == profit_ratio
assert rpc_mock.call_count == 4
last_msg = rpc_mock.call_args_list[-2][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': profit_or_loss,
'limit': limit,
'order_rate': limit,
'amount': pytest.approx(amount),
'order_type': 'market',
'buy_tag': None,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': current_rate,
'profit_amount': pytest.approx(profit_amount),
'profit_ratio': profit_ratio,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': ExitType.ROI.value,
'exit_reason': ExitType.ROI.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_insufficient_funds_error(default_conf_usdt, ticker_usdt, fee, is_short,
ticker_usdt_sell_up, mocker) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds')
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
create_order=MagicMock(side_effect=[
{'id': 1234553382},
InsufficientFundsError(),
]),
)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
sell_reason = ExitCheckTuple(exit_type=ExitType.ROI)
assert not freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=sell_reason
)
assert mock_insuf.call_count == 1
@pytest.mark.parametrize('profit_only,bid,ask,handle_first,handle_second,exit_type,is_short', [
# Enable profit
(True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, False),
(True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, True),
# # Disable profit
(False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, False),
(False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, True),
# # Enable loss
# # * Shouldn't this be ExitType.STOP_LOSS.value
(True, 0.21, 0.22, False, False, None, False),
(True, 2.41, 2.42, False, False, None, True),
# Disable loss
(False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, False),
(False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, True),
])
def test_exit_profit_only(
default_conf_usdt, limit_order, limit_order_open, is_short,
fee, mocker, profit_only, bid, ask, handle_first, handle_second, exit_type) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': bid,
'ask': ask,
'last': bid
}),
create_order=MagicMock(side_effect=[
limit_order[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt.update({
'use_exit_signal': True,
'exit_profit_only': profit_only,
'exit_profit_offset': 0.1,
})
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.custom_exit = MagicMock(return_value=None)
if exit_type == ExitType.EXIT_SIGNAL.value:
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
else:
freqtrade.strategy.ft_stoploss_reached = MagicMock(return_value=ExitCheckTuple(
exit_type=ExitType.NONE))
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_order(limit_order[eside])
trade.update_trade(oobj)
freqtrade.wallets.update()
if profit_only:
assert freqtrade.handle_trade(trade) is False
# Custom-exit is called
freqtrade.strategy.custom_exit.call_count == 1
patch_get_signal(freqtrade, enter_long=False, exit_short=is_short, exit_long=not is_short)
assert freqtrade.handle_trade(trade) is handle_first
if handle_second:
freqtrade.strategy.exit_profit_offset = 0.0
assert freqtrade.handle_trade(trade) is True
def test_sell_not_enough_balance(default_conf_usdt, limit_order, limit_order_open,
fee, mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 0.00002172,
'ask': 0.00002173,
'last': 0.00002172
}),
create_order=MagicMock(side_effect=[
limit_order_open['buy'],
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
amnt = trade.amount
oobj = Order.parse_from_ccxt_object(limit_order['buy'], limit_order['buy']['symbol'], 'buy')
trade.update_trade(oobj)
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=trade.amount * 0.985))
assert freqtrade.handle_trade(trade) is True
assert log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert trade.amount != amnt
@pytest.mark.parametrize('amount_wallet,has_err', [
(95.29, False),
(91.29, True)
])
def test__safe_exit_amount(default_conf_usdt, fee, caplog, mocker, amount_wallet, has_err):
patch_RPCManager(mocker)
patch_exchange(mocker)
amount = 95.33
amount_wallet = amount_wallet
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=amount_wallet))
wallet_update = mocker.patch('freqtrade.wallets.Wallets.update')
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
if has_err:
with pytest.raises(DependencyException, match=r"Not enough amount to exit trade."):
assert freqtrade._safe_exit_amount(trade, trade.pair, trade.amount)
else:
wallet_update.reset_mock()
assert trade.amount != amount_wallet
assert freqtrade._safe_exit_amount(trade, trade.pair, trade.amount) == amount_wallet
assert log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert trade.amount == amount_wallet
assert wallet_update.call_count == 1
caplog.clear()
wallet_update.reset_mock()
assert freqtrade._safe_exit_amount(trade, trade.pair, amount_wallet) == amount_wallet
assert not log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert wallet_update.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_locked_pairs(default_conf_usdt, ticker_usdt, fee,
ticker_usdt_sell_down, mocker, caplog, is_short) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_down
)
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_down()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)
)
trade.close(ticker_usdt_sell_down()['bid'])
assert freqtrade.strategy.is_pair_locked(trade.pair, side='*')
# Boths sides are locked
assert freqtrade.strategy.is_pair_locked(trade.pair, side='long')
assert freqtrade.strategy.is_pair_locked(trade.pair, side='short')
# reinit - should buy other pair.
caplog.clear()
freqtrade.enter_positions()
assert log_has_re(fr"Pair {trade.pair} \* is locked.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open, is_short,
fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
limit_order_open[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['ignore_roi_if_entry_signal'] = True
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
oobj = Order.parse_from_ccxt_object(
limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_trade(oobj)
freqtrade.wallets.update()
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
assert freqtrade.handle_trade(trade) is False
# Test if entry-signal is absent (should sell due to roi = true)
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=False, exit_tag='something')
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=False, exit_tag='something')
assert freqtrade.handle_trade(trade) is True
assert trade.exit_reason == ExitType.ROI.value
@pytest.mark.parametrize("is_short,val1,val2", [
(False, 1.5, 1.1),
(True, 0.5, 0.9)
])
def test_trailing_stop_loss(default_conf_usdt, limit_order_open,
is_short, val1, val2, fee, caplog, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.0,
'ask': 2.0,
'last': 2.0
}),
create_order=MagicMock(side_effect=[
limit_order_open[entry_side(is_short)],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['trailing_stop'] = True
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert freqtrade.handle_trade(trade) is False
# Raise praise into profits
mocker.patch(f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 2.0 * val1,
'ask': 2.0 * val1,
'last': 2.0 * val1
}))
# Stoploss should be adjusted
assert freqtrade.handle_trade(trade) is False
caplog.clear()
# Price fell
mocker.patch(f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 2.0 * val2,
'ask': 2.0 * val2,
'last': 2.0 * val2
}))
caplog.set_level(logging.DEBUG)
# Sell as trailing-stop is reached
assert freqtrade.handle_trade(trade) is True
stop_multi = 1.1 if is_short else 0.9
assert log_has(f"ETH/USDT - HIT STOP: current price at {(2.0 * val2):6f}, "
f"stoploss is {(2.0 * val1 * stop_multi):6f}, "
f"initial stoploss was at {(2.0 * stop_multi):6f}, trade opened at 2.000000",
caplog)
assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value
@pytest.mark.parametrize('offset,trail_if_reached,second_sl,is_short', [
(0, False, 2.0394, False),
(0.011, False, 2.0394, False),
(0.055, True, 1.8, False),
(0, False, 2.1614, True),
(0.011, False, 2.1614, True),
(0.055, True, 2.42, True),
])
def test_trailing_stop_loss_positive(
default_conf_usdt, limit_order, limit_order_open,
offset, fee, caplog, mocker, trail_if_reached, second_sl, is_short
) -> None:
enter_price = limit_order[entry_side(is_short)]['price']
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': enter_price - (-0.01 if is_short else 0.01),
'ask': enter_price - (-0.01 if is_short else 0.01),
'last': enter_price - (-0.01 if is_short else 0.01),
}),
create_order=MagicMock(side_effect=[
limit_order[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['trailing_stop'] = True
default_conf_usdt['trailing_stop_positive'] = 0.01
if offset:
default_conf_usdt['trailing_stop_positive_offset'] = offset
default_conf_usdt['trailing_only_offset_is_reached'] = trail_if_reached
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_order(limit_order[eside])
trade.update_trade(oobj)
caplog.set_level(logging.DEBUG)
# stop-loss not reached
assert freqtrade.handle_trade(trade) is False
# Raise ticker_usdt above buy price
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.06 if is_short else 0.06),
'ask': enter_price + (-0.06 if is_short else 0.06),
'last': enter_price + (-0.06 if is_short else 0.06),
})
)
caplog.clear()
# stop-loss not reached, adjusted stoploss
assert freqtrade.handle_trade(trade) is False
caplog_text = (f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: "
f"{'2.49' if not is_short else '2.24'}%")
if trail_if_reached:
assert not log_has(caplog_text, caplog)
assert not log_has("ETH/USDT - Adjusting stoploss...", caplog)
else:
assert log_has(caplog_text, caplog)
assert log_has("ETH/USDT - Adjusting stoploss...", caplog)
assert pytest.approx(trade.stop_loss) == second_sl
caplog.clear()
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.135 if is_short else 0.125),
'ask': enter_price + (-0.135 if is_short else 0.125),
'last': enter_price + (-0.135 if is_short else 0.125),
})
)
assert freqtrade.handle_trade(trade) is False
assert log_has(
f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: "
f"{'5.72' if not is_short else '5.67'}%",
caplog
)
assert log_has("ETH/USDT - Adjusting stoploss...", caplog)
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.02 if is_short else 0.02),
'ask': enter_price + (-0.02 if is_short else 0.02),
'last': enter_price + (-0.02 if is_short else 0.02),
})
)
# Lower price again (but still positive)
assert freqtrade.handle_trade(trade) is True
assert log_has(
f"ETH/USDT - HIT STOP: current price at {enter_price + (-0.02 if is_short else 0.02):.6f}, "
f"stoploss is {trade.stop_loss:.6f}, "
f"initial stoploss was at {'2.42' if is_short else '1.80'}0000, "
f"trade opened at {2.2 if is_short else 2.0}00000",
caplog)
assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value
@pytest.mark.parametrize("is_short", [False, True])
def test_disable_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open,
is_short, fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.0,
'ask': 2.0,
'last': 2.0
}),
create_order=MagicMock(side_effect=[
limit_order_open[eside],
{'id': 1234553382},
{'id': 1234553383}
]),
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
default_conf_usdt['exit_pricing'] = {
'ignore_roi_if_entry_signal': False
}
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
oobj = Order.parse_from_ccxt_object(
limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_trade(oobj)
# Sell due to min_roi_reached
patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short, exit_short=is_short)
assert freqtrade.handle_trade(trade) is True
# Test if entry-signal is absent
patch_get_signal(freqtrade)
assert freqtrade.handle_trade(trade) is True
assert trade.exit_reason == ExitType.ROI.value
def test_get_real_amount_quote(default_conf_usdt, trades_for_order, buy_order_fee, fee, caplog,
mocker):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
caplog.clear()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == (amount * 0.001)
assert log_has(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,'
' leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.',
caplog
)
def test_get_real_amount_quote_dust(default_conf_usdt, trades_for_order, buy_order_fee, fee,
caplog, mocker):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=8.1122)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is kept as is
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
assert walletmock.call_count == 1
assert log_has_re(r'Fee amount for Trade.* was in base currency '
'- Eating Fee 0.008 into dust', caplog)
def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mocker, fee):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
amount = buy_order_fee['amount']
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
assert log_has(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: '
'myTrade-Dict empty found',
caplog
)
@pytest.mark.parametrize(
'fee_par,fee_reduction_amount,use_ticker_usdt_rate,expected_log', [
# basic, amount does not change
({'cost': 0.008, 'currency': 'ETH'}, 0, False, None),
# no currency in fee
({'cost': 0.004, 'currency': None}, 0, True, None),
# BNB no rate
({'cost': 0.00094518, 'currency': 'BNB'}, 0, True, (
'Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False, '
'leverage=1.0, open_rate=0.24544100, open_since=closed) [buy]: 0.00094518 BNB -'
' rate: None'
)),
# from order
({'cost': 0.004, 'currency': 'LTC'}, 0.004, False, (
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004.'
)),
# invalid, no currency in from fee dict
({'cost': 0.008, 'currency': None}, 0, True, None),
])
def test_get_real_amount(
default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker, caplog,
fee_par, fee_reduction_amount, use_ticker_usdt_rate, expected_log
):
buy_order = deepcopy(buy_order_fee)
buy_order['fee'] = fee_par
trades_for_order[0]['fee'] = fee_par
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
if not use_ticker_usdt_rate:
mocker.patch(f'{EXMS}.fetch_ticker', side_effect=ExchangeError)
caplog.clear()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
res = freqtrade.get_real_amount(trade, buy_order, order_obj)
if fee_reduction_amount == 0:
assert res is None
else:
assert res == fee_reduction_amount
if expected_log:
assert log_has(expected_log, caplog)
@pytest.mark.parametrize(
'fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount', [
# basic, amount is reduced by fee
(None, None, 0.001, 0.001, 7.992),
# different fee currency on both trades, fee is average of both trade's fee
(0.02, 'BNB', 0.0005, 0.001518575, 7.996),
])
def test_get_real_amount_multi(
default_conf_usdt, trades_for_order2, buy_order_fee, caplog, fee, mocker, markets,
fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount,
):
trades_for_order = deepcopy(trades_for_order2)
if fee_cost:
trades_for_order[0]['fee']['cost'] = fee_cost
if fee_currency:
trades_for_order[0]['fee']['currency'] = fee_currency
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
default_conf_usdt['stake_currency'] = "ETH"
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441
)
# Fake markets entry to enable fee parsing
markets['BNB/ETH'] = markets['ETH/USDT']
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
mocker.patch(f'{EXMS}.fetch_ticker',
return_value={'ask': 0.19, 'last': 0.2})
# Amount is reduced by "fee"
expected_amount = amount * fee_reduction_amount
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount
assert log_has(
(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), '
f'fee={expected_amount}.'
),
caplog
)
assert trade.fee_open == expected_fee
assert trade.fee_close == expected_fee
assert trade.fee_open_cost is not None
assert trade.fee_open_currency is not None
assert trade.fee_close_cost is None
assert trade.fee_close_currency is None
def test_get_real_amount_invalid_order(default_conf_usdt, trades_for_order, buy_order_fee, fee,
mocker):
limit_buy_order_usdt = deepcopy(buy_order_fee)
limit_buy_order_usdt['fee'] = {'cost': 0.004}
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount does not change
assert freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) is None
def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_doublefee,
fee, mocker):
tfo_mock = mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
mocker.patch(f'{EXMS}.get_valid_pair_combination', return_value='BNB/USDT')
mocker.patch(f'{EXMS}.fetch_ticker', return_value={'last': 200})
trade = Trade(
pair='LTC/USDT',
amount=30.0,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# Amount does not change
assert trade.fee_open == 0.0025
order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, market_buy_order_usdt_doublefee, order_obj) is None
assert tfo_mock.call_count == 0
# Fetch fees from trades dict if available to get "proper" values
assert round(trade.fee_open, 4) == 0.001
def test_get_real_amount_wrong_amount(default_conf_usdt, trades_for_order, buy_order_fee, fee,
mocker):
limit_buy_order_usdt = deepcopy(buy_order_fee)
limit_buy_order_usdt['amount'] = limit_buy_order_usdt['amount'] - 0.001
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount does not change
with pytest.raises(DependencyException, match=r"Half bought\? Amounts don't match"):
freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
def test_get_real_amount_wrong_amount_rounding(default_conf_usdt, trades_for_order, buy_order_fee,
fee, mocker):
# Floats should not be compared directly.
limit_buy_order_usdt = deepcopy(buy_order_fee)
trades_for_order[0]['amount'] = trades_for_order[0]['amount'] + 1e-15
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount changes by fee amount.
assert pytest.approx(freqtrade.get_real_amount(
trade, limit_buy_order_usdt, order_obj)) == (amount * 0.001)
def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker):
amount = 12345
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
order = {
'id': 'mocked_order',
'amount': amount,
'status': 'open',
'side': 'buy',
'price': 0.245441,
}
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(order, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, order, order_obj) is None
def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, caplog):
limit_buy_order_usdt = deepcopy(buy_order_fee)
# Fees amount in "POINT"
trades = [{
"info": {
},
"id": "some_trade_id",
"timestamp": 1660092505903,
"datetime": "2022-08-10T00:48:25.903Z",
"symbol": "CEL/USDT",
"order": "some_order_id",
"type": None,
"side": "sell",
"takerOrMaker": "taker",
"price": 1.83255,
"amount": 83.126,
"cost": 152.3325513,
"fee": {
"currency": "POINT",
"cost": 0.3046651026
},
"fees": [
{
"cost": "0",
"currency": "USDT"
},
{
"cost": "0",
"currency": "GT"
},
{
"cost": "0.3046651026",
"currency": "POINT"
}
]
}]
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades)
amount = float(sum(x['amount'] for x in trades))
trade = Trade(
pair='CEL/USDT',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441
)
limit_buy_order_usdt['amount'] = amount
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
assert res is None
assert trade.fee_open_currency is None
assert trade.fee_open_cost is None
message = "Not updating buy-fee - rate: None, POINT."
assert log_has(message, caplog)
caplog.clear()
freqtrade.config['exchange']['unknown_fee_rate'] = 1
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
assert res is None
assert trade.fee_open_currency == 'POINT'
assert pytest.approx(trade.fee_open_cost) == 0.3046651026
assert trade.fee_open == 0.002
assert trade.fee_open != fee.return_value
assert not log_has(message, caplog)
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
(8.0, 0.0, 10, None),
(8.0, 0.0, 0, None),
(8.0, 0.1, 0, 0.1),
(8.0, 0.1, 10, None),
(8.0, 0.1, 8.0, None),
(8.0, 0.1, 7.9, 0.1),
])
def test_apply_fee_conditional(default_conf_usdt, fee, mocker, caplog,
amount, fee_abs, wallet, amount_exp):
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
order = Order(
ft_order_side='buy',
order_id='100',
ft_pair=trade.pair,
ft_is_open=True,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
# Amount is kept as is
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order) == amount_exp
assert walletmock.call_count == 1
if fee_abs != 0 and amount_exp is None:
assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog)
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
(8.0, 0.0, 16, None),
(8.0, 0.0, 0, None),
(8.0, 0.1, 8, 0.1),
(8.0, 0.1, 20, None),
(8.0, 0.1, 16.0, None),
(8.0, 0.1, 7.9, 0.1),
(8.0, 0.1, 12, 0.1),
(8.0, 0.1, 15.9, 0.1),
])
def test_apply_fee_conditional_multibuy(default_conf_usdt, fee, mocker, caplog,
amount, fee_abs, wallet, amount_exp):
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value
)
# One closed order
order = Order(
ft_order_side='buy',
order_id='10',
ft_pair=trade.pair,
ft_is_open=False,
filled=amount,
status="closed"
)
trade.orders.append(order)
# Add additional order - this should NOT eat into dust unless the wallet was bigger already.
order1 = Order(
ft_order_side='buy',
order_id='100',
ft_pair=trade.pair,
ft_is_open=True,
)
trade.orders.append(order1)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
# The new trade amount will be 2x amount - fee / wallet will have to be adapted to this.
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order1) == amount_exp
assert walletmock.call_count == 1
if fee_abs != 0 and amount_exp is None:
assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog)
@pytest.mark.parametrize("delta, is_high_delta", [
(0.1, False),
(100, True),
])
@pytest.mark.parametrize('is_short', [False, True])
def test_order_book_depth_of_market(
default_conf_usdt, ticker_usdt, limit_order_open,
fee, mocker, order_book_l2, delta, is_high_delta, is_short
):
ticker_side = 'ask' if is_short else 'bid'
default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True
default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = delta
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_order_open[entry_side(is_short)]),
get_fee=fee,
)
# Save state of current whitelist
whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist'])
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
if is_high_delta:
assert trade is None
else:
trade.is_short = is_short
assert trade is not None
assert pytest.approx(trade.stake_amount) == 60.0
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
assert len(Trade.session.scalars(select(Trade)).all()) == 1
# Simulate fulfilled LIMIT_BUY order for trade
oobj = Order.parse_from_ccxt_object(
limit_order_open[entry_side(is_short)], 'ADA/USDT', entry_side(is_short))
trade.update_trade(oobj)
assert trade.open_rate == ticker_usdt.return_value[ticker_side]
assert whitelist == default_conf_usdt['exchange']['pair_whitelist']
@pytest.mark.parametrize('exception_thrown,ask,last,order_book_top,order_book', [
(False, 0.045, 0.046, 2, None),
(True, 0.042, 0.046, 1, {'bids': [[]], 'asks': [[]]})
])
def test_order_book_entry_pricing1(mocker, default_conf_usdt, order_book_l2, exception_thrown,
ask, last, order_book_top, order_book, caplog) -> None:
"""
test if function get_rate will return the order book price instead of the ask rate
"""
patch_exchange(mocker)
ticker_usdt_mock = MagicMock(return_value={'ask': ask, 'last': last})
mocker.patch.multiple(
EXMS,
fetch_l2_order_book=MagicMock(return_value=order_book) if order_book else order_book_l2,
fetch_ticker=ticker_usdt_mock,
)
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['entry_pricing']['use_order_book'] = True
default_conf_usdt['entry_pricing']['order_book_top'] = order_book_top
default_conf_usdt['entry_pricing']['price_last_balance'] = 0
default_conf_usdt['telegram']['enabled'] = False
freqtrade = FreqtradeBot(default_conf_usdt)
if exception_thrown:
with pytest.raises(PricingError):
freqtrade.exchange.get_rate('ETH/USDT', side="entry", is_short=False, refresh=True)
assert log_has_re(
r'ETH/USDT - Entry Price at location 1 from orderbook could not be determined.', caplog)
else:
assert freqtrade.exchange.get_rate(
'ETH/USDT', side="entry", is_short=False, refresh=True) == 0.043935
assert ticker_usdt_mock.call_count == 0
def test_check_depth_of_market(default_conf_usdt, mocker, order_book_l2) -> None:
"""
test check depth of market
"""
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_l2_order_book=order_book_l2
)
default_conf_usdt['telegram']['enabled'] = False
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True
# delta is 100 which is impossible to reach. hence function will return false
default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = 100
freqtrade = FreqtradeBot(default_conf_usdt)
conf = default_conf_usdt['entry_pricing']['check_depth_of_market']
assert freqtrade._check_depth_of_market('ETH/BTC', conf, side=SignalDirection.LONG) is False
@pytest.mark.parametrize('is_short', [False, True])
def test_order_book_exit_pricing(
default_conf_usdt, limit_buy_order_usdt_open, limit_buy_order_usdt, fee, is_short,
limit_sell_order_usdt_open, mocker, order_book_l2, caplog) -> None:
"""
test order book ask strategy
"""
mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2)
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['exit_pricing']['use_order_book'] = True
default_conf_usdt['exit_pricing']['order_book_top'] = 1
default_conf_usdt['telegram']['enabled'] = False
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
limit_buy_order_usdt_open,
limit_sell_order_usdt_open,
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade
time.sleep(0.01) # Race condition fix
oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, limit_buy_order_usdt['symbol'], 'buy')
trade.update_trade(oobj)
freqtrade.wallets.update()
assert trade.is_open is True
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
assert freqtrade.handle_trade(trade) is True
assert trade.close_rate_requested == order_book_l2.return_value['asks'][0][0]
mocker.patch(f'{EXMS}.fetch_l2_order_book', return_value={'bids': [[]], 'asks': [[]]})
with pytest.raises(PricingError):
freqtrade.handle_trade(trade)
assert log_has_re(
r"ETH/USDT - Exit Price at location 1 from orderbook could not be determined\..*",
caplog)
def test_startup_state(default_conf_usdt, mocker):
default_conf_usdt['pairlist'] = {'method': 'VolumePairList',
'config': {'number_assets': 20}
}
mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True))
worker = get_patched_worker(mocker, default_conf_usdt)
assert worker.freqtrade.state is State.RUNNING
def test_startup_trade_reinit(default_conf_usdt, edge_conf, mocker):
mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True))
reinit_mock = MagicMock()
mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', reinit_mock)
ftbot = get_patched_freqtradebot(mocker, default_conf_usdt)
ftbot.startup()
assert reinit_mock.call_count == 1
reinit_mock.reset_mock()
ftbot = get_patched_freqtradebot(mocker, edge_conf)
ftbot.startup()
assert reinit_mock.call_count == 0
@pytest.mark.usefixtures("init_persistence")
def test_sync_wallet_dry_run(mocker, default_conf_usdt, ticker_usdt, fee, limit_buy_order_usdt_open,
caplog):
default_conf_usdt['dry_run'] = True
# Initialize to 2 times stake amount
default_conf_usdt['dry_run_wallet'] = 120.0
default_conf_usdt['max_open_trades'] = 2
default_conf_usdt['tradable_balance_ratio'] = 1.0
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
bot = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(bot)
assert bot.wallets.get_free('USDT') == 120.0
n = bot.enter_positions()
assert n == 2
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == 2
bot.config['max_open_trades'] = 3
n = bot.enter_positions()
assert n == 0
assert log_has_re(r"Unable to create trade for XRP/USDT: "
r"Available balance \(0.0 USDT\) is lower than stake amount \(60.0 USDT\)",
caplog)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short,buy_calls,sell_calls", [
(False, 1, 1),
(True, 1, 1),
])
def test_cancel_all_open_orders(mocker, default_conf_usdt, fee, limit_order, limit_order_open,
is_short, buy_calls, sell_calls):
default_conf_usdt['cancel_open_orders_on_exit'] = True
mocker.patch(
f'{EXMS}.fetch_order',
side_effect=[
ExchangeError(),
limit_order[exit_side(is_short)],
limit_order_open[entry_side(is_short)],
limit_order_open[exit_side(is_short)],
]
)
buy_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter')
sell_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == MOCK_TRADE_COUNT
freqtrade.cancel_all_open_orders()
assert buy_mock.call_count == buy_calls
assert sell_mock.call_count == sell_calls
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_check_for_open_trades(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 0
create_mock_trades(fee, is_short)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 1
assert 'Handle these trades manually' in freqtrade.rpc.send_msg.call_args[0][0]['status']
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_startup_update_open_orders(mocker, default_conf_usdt, fee, caplog, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
freqtrade.startup_update_open_orders()
assert not log_has_re(r"Error updating Order .*", caplog)
caplog.clear()
freqtrade.config['dry_run'] = False
freqtrade.startup_update_open_orders()
assert len(Order.get_open_orders()) == 4
matching_buy_order = mock_order_4(is_short=is_short)
matching_buy_order.update({
'status': 'closed',
})
mocker.patch(f'{EXMS}.fetch_order', return_value=matching_buy_order)
freqtrade.startup_update_open_orders()
# Only stoploss and sell orders are kept open
assert len(Order.get_open_orders()) == 3
caplog.clear()
mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError)
freqtrade.startup_update_open_orders()
assert log_has_re(r"Error updating Order .*", caplog)
mocker.patch(f'{EXMS}.fetch_order', side_effect=InvalidOrderException)
hto_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order')
# Orders which are no longer found after X days should be assumed as canceled.
freqtrade.startup_update_open_orders()
assert log_has_re(r"Order is older than \d days.*", caplog)
assert hto_mock.call_count == 3
assert hto_mock.call_args_list[0][0][0]['status'] == 'canceled'
assert hto_mock.call_args_list[1][0][0]['status'] == 'canceled'
@pytest.mark.usefixtures("init_persistence")
def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades_usdt(fee)
trades = Trade.get_trades().all()
trades[-1].exchange = 'some_other_exchange'
for trade in trades:
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
freqtrade.startup_backpopulate_precision()
trades = Trade.get_trades().all()
for trade in trades:
if trade.exchange == 'some_other_exchange':
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
else:
assert trade.price_precision is not None
assert trade.amount_precision is not None
assert trade.precision_mode is not None
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
def patch_with_fee(order):
order.update({'fee': {'cost': 0.1, 'rate': 0.01,
'currency': order['symbol'].split('/')[0]}})
return order
mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order',
side_effect=[ | patch_with_fee(mock_order_2_sell(is_short=is_short)), | 16 | 2023-11-07 18:46:03+00:00 | 8k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | tests/test_dataloader/test_dataloader.py | [
{
"identifier": "TransformerModelSpec",
"path": "dynapipe/model.py",
"snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )"
},
{
"identifier": "get_uniform_cluster",
"path": "dynapipe/model.py",
"snippet": "def get_uniform_cluster(n_devices, intra_node_bw=4800, inter_node_bw=100):\n device2node = {i: i for i in range(n_devices)}\n memory_limits = [1000000] * n_devices\n cluster = DynaPipeCluster(\n device2node, memory_limits, intra_node_bw, inter_node_bw, 0, 0\n )\n return cluster"
},
{
"identifier": "DynaPipeDataLoader",
"path": "dynapipe/pipe/data_loader.py",
"snippet": "class DynaPipeDataLoader:\n \"\"\"\n A wrapper around PyTorch DataLoader, which automatically generates\n execution plans for each batch and returns the execution plan along\n with the batch of data.\n\n On local rank 0 of each node, it starts a poller process which creates\n a Torch DataLoader wrapping the user provided dataset and prefetches data.\n Each worker in the Torch DataLoader is instructed to compute the execution\n plan for assigned batches and pushes the execution plan to a shared kv\n store. On the node where kv store is hosted, it is also responsible for kv\n store initialization.\n\n On all ranks, it creates a torch DataLoader wrapping the user dataset.\n In addition to the data, it also returns the execution plan for the batch,\n fetched from the shared kv store.\n \"\"\"\n\n def __init__(\n self,\n training_spec: TrainingSpec,\n dataset,\n pack_fn,\n constructor_fn,\n is_kv_host,\n node_rank=0,\n node_local_rank=0,\n node_size=1,\n dp_rank=0,\n pp_rank=0,\n virtual_pp_rank=0,\n collate_fn=torch.utils.data.default_collate,\n batch_size=1,\n shuffle=False,\n sampler=None,\n batch_sampler=None,\n num_workers=0,\n num_preprocess_workers=64,\n pin_memory=False,\n drop_last=False,\n timeout=0,\n *args,\n prefetch_factor=2,\n persistent_workers=False,\n encoder_key=\"text_enc\",\n decoder_key=\"text_dec\",\n ):\n self.node_rank = node_rank\n self.node_local_rank = node_local_rank\n self.dp_rank = dp_rank\n self.pp_rank = pp_rank\n assert pp_rank < training_spec.pipeline_parallel_size, (\n f\"pp_rank ({pp_rank}) should be smaller than \"\n f\"pipeline_parallel_size ({training_spec.pipeline_parallel_size})\"\n \"in training_spec.\"\n )\n # virtual rank is similar to virtual_pipeline_model_parallel_rank\n # in Megatron-LM, where multiple data loaders are created for\n # interleaved scheduling.\n self.virtual_pp_rank = virtual_pp_rank\n assert virtual_pp_rank < training_spec.n_chunks_per_device, (\n f\"virtual_pp_rank ({virtual_pp_rank}) should be smaller than \"\n f\"n_chunks_per_device ({training_spec.n_chunks_per_device})\"\n \"in training_spec, calculated from device assignment.\"\n )\n # create queues\n self.poller_control_queue = mp.Queue()\n self.num_preprocess_workers = num_preprocess_workers\n\n if self.node_local_rank == 0 and self.virtual_pp_rank == 0:\n dataloader_args = DataloaderArgs(\n dataset,\n batch_size,\n shuffle,\n sampler,\n batch_sampler,\n num_preprocess_workers,\n pack_fn,\n drop_last,\n prefetch_factor,\n persistent_workers,\n *args,\n )\n assigned_iters_per_node = num_preprocess_workers * prefetch_factor\n self.poller_process = mp.Process(\n target=_preprocessor_poller,\n args=(\n self.poller_control_queue,\n dataloader_args,\n training_spec,\n node_rank,\n node_size,\n is_kv_host,\n assigned_iters_per_node,\n encoder_key,\n decoder_key,\n ),\n )\n self.poller_process.start()\n # create torch dataloader\n worker_data = DataloaderWorkerData(\n dp_rank=dp_rank,\n pp_rank=pp_rank,\n virtual_pp_rank=virtual_pp_rank,\n )\n dataset.worker_data = worker_data\n self.data_loader = PTDataLoader(\n dataset,\n batch_size,\n shuffle,\n sampler,\n batch_sampler,\n num_workers,\n get_collate_fn(\n pack_fn,\n constructor_fn,\n collate_fn,\n encoder_key=encoder_key,\n decoder_key=decoder_key,\n ),\n pin_memory,\n drop_last,\n timeout,\n _worker_init_fn,\n *args,\n prefetch_factor=prefetch_factor,\n persistent_workers=persistent_workers,\n )\n\n def __del__(self):\n if hasattr(self, \"poller_process\"):\n if self.poller_process.is_alive():\n self.poller_control_queue.put(\"exit\")\n self.poller_process.join()\n\n def __iter__(self):\n yield from self.data_loader\n\n def __len__(self):\n return self.data_loader.__len__()\n\n def check_worker_number_rationality(self):\n if self.num_preprocess_workers == 0:\n logger.warn(\n \"DynaPipeDataLoader should be used with a large number of \"\n \"preprocessing workers to achieve good performance. \"\n \"Current setting is num_preprocess_workers=0.\"\n )\n self.data_loader.check_worker_number_rationality()"
},
{
"identifier": "TrainingSpec",
"path": "dynapipe/pipe/data_loader.py",
"snippet": "class TrainingSpec:\n cm_path: str\n cluster_spec: DynaPipeCluster\n model_spec: TransformerModelSpec\n data_parallel_size: int\n tensor_parallel_size: int\n pipeline_parallel_size: int\n zero_stage: int\n device_assignment: List[int]\n device_memory_limit: int\n partition_algo: str = \"dp\"\n token_based_partition_mbs: int = 0\n disable_tsp: bool = False\n schedule_method: str = \"dynamic\"\n disable_mb_permutation: bool = False\n disable_scheduler_memory_limit: bool = False\n enable_packing: bool = False\n per_mb_memory_fraction: float = -1.0\n round_seqlen_multiple: int = 8\n seqlen_offset: int = 0\n limit_rc_type: Optional[List[str]] = None\n model_type: str = \"gpt\"\n n_executors: int = field(init=False)\n n_layers_per_stage: int = field(init=False)\n n_chunks_per_device: int = field(init=False)\n\n def __post_init__(self):\n self.n_executors = max(self.device_assignment) + 1\n (\n _,\n _,\n self.n_layers_per_stage,\n self.n_chunks_per_device,\n ) = validate_device_assignment(\n self.model_spec, self.cluster_spec, self.device_assignment\n )"
},
{
"identifier": "ExecutionPlan",
"path": "dynapipe/pipe/instructions.py",
"snippet": "class ExecutionPlan:\n \"\"\"\n Sequences of PipeInstructions to be executed by the PipeEngine, which\n defines the buffer allocation, the shape of the tensors and the pipeline\n schedule.\n\n The sequences of instructions must be executed in the exact order they are\n defined in the plan. No synchronization should be performed between\n instructions to avoid deadlock.\n\n Args:\n stages (int): The number of pipeline stages.\n stage_id (int): The stage that will execute the generated schedule.\n \"\"\"\n\n def __init__(\n self,\n instructions: List[PipeInstruction],\n micro_batches: int,\n nranks: int,\n nstages: int,\n rank: int,\n assigned_stages: List[int],\n recompute_method: RecomputeMethod = RecomputeMethod.NONE,\n num_pipe_buffers: Optional[int] = 0,\n ):\n self.instructions = instructions\n self.micro_batches = micro_batches\n self.nranks = nranks\n self.nstages = nstages\n self.rank = rank\n self.assigned_stages = assigned_stages\n self.recompute_method = recompute_method\n self._valid_rank(rank)\n self.num_pipe_buffers = num_pipe_buffers\n\n def _valid_rank(self, rank):\n return 0 <= rank < self.nranks\n\n @property\n def num_micro_batches(self):\n \"\"\"The number of total micro_batches in this schedule.\"\"\"\n return self.micro_batches\n\n def __iter__(self):\n self.it = None\n return self\n\n def __next__(self):\n if self.it is None:\n self.it = self.steps()\n return next(self.it)\n\n def __repr__(self) -> str:\n return (\n \"ExecutionPlan(micro_batches={}, nranks={}, nstages={}, rank={}, \"\n \"assigned_stages={}, recompute_method={}, \"\n \"num_pipe_buffers={}, instructions={})\".format(\n self.micro_batches,\n self.nranks,\n self.nstages,\n self.rank,\n self.assigned_stages,\n _RECOMPUTE_METHOD_NAMES[self.recompute_method],\n self.num_pipe_buffers,\n self.instructions,\n )\n )\n\n def __str__(self):\n \"\"\"Print the execution plan in a human readable format.\"\"\"\n return (\n \"ExecutionPlan(micro_batches={}, nranks={}, nstages={}, rank={}, \"\n \"assigned_stages={}, recompute_method={}, \"\n \"num_pipe_buffers={}, instructions=[\\n\\t\".format(\n self.micro_batches,\n self.nranks,\n self.nstages,\n self.rank,\n self.assigned_stages,\n _RECOMPUTE_METHOD_NAMES[self.recompute_method],\n self.num_pipe_buffers,\n )\n + \"\\n\\t\".join([str(x) for x in self.instructions])\n + \"\\n])\"\n )\n\n def __eq__(self, other: \"ExecutionPlan\"):\n if not isinstance(other, ExecutionPlan):\n return False\n return (\n self.micro_batches == other.micro_batches\n and self.nranks == other.nranks\n and self.nstages == other.nstages\n and self.rank == other.rank\n and self.assigned_stages == other.assigned_stages\n and self.recompute_method == other.recompute_method\n and self.num_pipe_buffers == other.num_pipe_buffers\n and self.instructions == other.instructions\n )\n\n def serialize(self, config=SerializationConfig()) -> bytes:\n \"\"\"Serialize the execution plan to a byte array.\"\"\"\n\n def _serialize_plan_meta(x: int):\n return x.to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n\n return (\n _serialize_plan_meta(self.micro_batches)\n + _serialize_plan_meta(self.nranks)\n + _serialize_plan_meta(self.nstages)\n + _serialize_plan_meta(self.rank)\n + _serialize_plan_meta(len(self.assigned_stages))\n + b\"\".join([_serialize_plan_meta(x) for x in self.assigned_stages])\n + _serialize_plan_meta(self.recompute_method)\n + _serialize_plan_meta(self.num_pipe_buffers)\n + len(self.instructions).to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n + b\"\".join(\n [instr.serialize(config) for instr in self.instructions]\n )\n )\n\n @classmethod\n def deserialize(\n cls, bytes, config=SerializationConfig()\n ) -> \"ExecutionPlan\":\n \"\"\"Deserialize the execution plan from a byte array.\"\"\"\n\n def _deserialize_plan_meta(bytes):\n return (\n int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES],\n config.BYTES_ENDIANNESS,\n ),\n bytes[config.EXECUTION_PLAN_META_BYTES :],\n )\n\n micro_batches, bytes = _deserialize_plan_meta(bytes)\n nranks, bytes = _deserialize_plan_meta(bytes)\n nstages, bytes = _deserialize_plan_meta(bytes)\n rank, bytes = _deserialize_plan_meta(bytes)\n n_assigned_stages, bytes = _deserialize_plan_meta(bytes)\n assigned_stages = []\n for _ in range(n_assigned_stages):\n assigned_stage, bytes = _deserialize_plan_meta(bytes)\n assigned_stages.append(assigned_stage)\n recompute_method, bytes = _deserialize_plan_meta(bytes)\n num_pipe_buffers, bytes = _deserialize_plan_meta(bytes)\n n_instructions = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES], config.BYTES_ENDIANNESS\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n instructions = []\n for _ in range(n_instructions):\n instr, bytes = PipeInstruction.deserialize(bytes, config=config)\n instructions.append(instr)\n assert len(bytes) == 0\n return cls(\n instructions,\n micro_batches,\n nranks,\n nstages,\n rank,\n assigned_stages,\n recompute_method,\n num_pipe_buffers,\n )"
},
{
"identifier": "ForwardPass",
"path": "dynapipe/pipe/instructions.py",
"snippet": "class ForwardPass(BufferInstruction):\n \"\"\"Execute the forward pass.\"\"\"\n\n pass"
}
] | import os
import pytest
import torch
import torch.distributed as dist
from torch.utils.data import Dataset
from dynapipe.model import TransformerModelSpec, get_uniform_cluster
from dynapipe.pipe.data_loader import DynaPipeDataLoader, TrainingSpec
from dynapipe.pipe.instructions import ExecutionPlan, ForwardPass | 4,474 | # torchrun --standalone --nnodes=1 --nproc_per_node=4 test_dataloader.py
# Others:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=2 test_dataloader.py
torch.manual_seed(42)
@pytest.fixture(scope="module", autouse=True)
def init_torch_distributed():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
class DummyDataset(Dataset):
def __init__(self, size, inputs_only=False):
self.size = size
torch.manual_seed(42)
# pre-generate all data
self.enc_seqlen = []
self.dec_seqlen = []
self.data = []
for _ in range(size):
enc_seqlen, dec_seqlen = torch.randint(24, 512, (2,))
self.enc_seqlen.append(enc_seqlen)
if not inputs_only:
self.dec_seqlen.append(dec_seqlen)
result = {
"text_enc": list(
torch.randint(0, 100, (enc_seqlen,)).numpy()
),
"text_dec": list(
torch.randint(0, 100, (dec_seqlen,)).numpy()
),
}
else:
result = {
"text": list(torch.randint(0, 100, (enc_seqlen,)).numpy()),
}
self.data.append(result)
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index]
def dummy_pack_fn(tensors):
# (input, extra)
if len(tensors) == 0:
return [], 0
if isinstance(tensors[0], list):
concated_list = []
for t in tensors:
concated_list.extend(t)
return concated_list, 0
return torch.cat(tensors, dim=0), 0
def dummy_constructor_fn(
encoder_input,
encoder_extra,
decoder_input,
decoder_extra,
encoder_seqlen,
decoder_seqlen,
):
encoder_padding_len = encoder_seqlen - len(encoder_input)
if decoder_input is not None:
decoder_padding_len = decoder_seqlen - len(decoder_input)
encoder_input = torch.tensor(encoder_input, dtype=torch.long)
if decoder_input is not None:
decoder_input = torch.tensor(decoder_input, dtype=torch.long)
encoder_padded = torch.cat(
[
encoder_input,
torch.zeros(
encoder_padding_len,
dtype=encoder_input.dtype,
device=encoder_input.device,
),
],
dim=0,
)
if decoder_input is not None:
decoder_padded = torch.cat(
[
decoder_input,
torch.zeros(
decoder_padding_len,
dtype=decoder_input.dtype,
device=decoder_input.device,
),
],
dim=0,
)
return {
"text_enc": encoder_padded,
"text_dec": decoder_padded,
}
else:
return {
"text": encoder_padded,
}
def get_mb_shape_from_ep(ep: ExecutionPlan):
fw_shapes = []
for instr in ep.instructions:
if isinstance(instr, ForwardPass):
fw_shapes.append(instr.buffer_shapes)
return fw_shapes
def test_joint_data_loader(inputs_only=False):
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Note: this test requires torch
# to run this test, exec:
# If running hanging tests or multi-node tests:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=4 test_dataloader.py
# Others:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=2 test_dataloader.py
torch.manual_seed(42)
@pytest.fixture(scope="module", autouse=True)
def init_torch_distributed():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
class DummyDataset(Dataset):
def __init__(self, size, inputs_only=False):
self.size = size
torch.manual_seed(42)
# pre-generate all data
self.enc_seqlen = []
self.dec_seqlen = []
self.data = []
for _ in range(size):
enc_seqlen, dec_seqlen = torch.randint(24, 512, (2,))
self.enc_seqlen.append(enc_seqlen)
if not inputs_only:
self.dec_seqlen.append(dec_seqlen)
result = {
"text_enc": list(
torch.randint(0, 100, (enc_seqlen,)).numpy()
),
"text_dec": list(
torch.randint(0, 100, (dec_seqlen,)).numpy()
),
}
else:
result = {
"text": list(torch.randint(0, 100, (enc_seqlen,)).numpy()),
}
self.data.append(result)
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index]
def dummy_pack_fn(tensors):
# (input, extra)
if len(tensors) == 0:
return [], 0
if isinstance(tensors[0], list):
concated_list = []
for t in tensors:
concated_list.extend(t)
return concated_list, 0
return torch.cat(tensors, dim=0), 0
def dummy_constructor_fn(
encoder_input,
encoder_extra,
decoder_input,
decoder_extra,
encoder_seqlen,
decoder_seqlen,
):
encoder_padding_len = encoder_seqlen - len(encoder_input)
if decoder_input is not None:
decoder_padding_len = decoder_seqlen - len(decoder_input)
encoder_input = torch.tensor(encoder_input, dtype=torch.long)
if decoder_input is not None:
decoder_input = torch.tensor(decoder_input, dtype=torch.long)
encoder_padded = torch.cat(
[
encoder_input,
torch.zeros(
encoder_padding_len,
dtype=encoder_input.dtype,
device=encoder_input.device,
),
],
dim=0,
)
if decoder_input is not None:
decoder_padded = torch.cat(
[
decoder_input,
torch.zeros(
decoder_padding_len,
dtype=decoder_input.dtype,
device=decoder_input.device,
),
],
dim=0,
)
return {
"text_enc": encoder_padded,
"text_dec": decoder_padded,
}
else:
return {
"text": encoder_padded,
}
def get_mb_shape_from_ep(ep: ExecutionPlan):
fw_shapes = []
for instr in ep.instructions:
if isinstance(instr, ForwardPass):
fw_shapes.append(instr.buffer_shapes)
return fw_shapes
def test_joint_data_loader(inputs_only=False): | cluster_spec = get_uniform_cluster(2) | 1 | 2023-11-08 07:58:20+00:00 | 8k |
apple/ml-reed | reed/models/reward_model.py | [
{
"identifier": "get_image_encoder",
"path": "reed/models/image_encoder.py",
"snippet": "def get_image_encoder(architecture: str, obs_dim: t.List[int], out_size: int = 1,\n hidden_dim: int = 128, hidden_depth: int = 3,\n image_hidden_num_channels: int = 32,\n *kwargs) -> nn.Module:\n \"\"\"\n Return the specified architecture initialized\n\n Args:\n architecture: which image encoder architecture to use\n obs_dim: dimensionality of the state images (height, width, channels)\n out_size: the size of the output\n hidden_dim: the size of the hidden layer(s)\n hidden_depth: the number of hidden layers\n image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder\n Returns:\n initialized image encoder\n \"\"\"\n if architecture == \"pixl2r\":\n # from PixL2R: https://arxiv.org/pdf/2007.15543.pdf & https://github.com/prasoongoyal/PixL2R/blob/b0691be6b27e705a62534b58f97ff7b8b6655c7d/src/supervised/model.py#L52\n return PixL2RImageEncoder(obs_dim=obs_dim, out_size=out_size,\n hidden_dim=hidden_dim, hidden_depth=hidden_depth,\n image_hidden_num_channels=image_hidden_num_channels)\n elif architecture == \"drqv2\":\n # from drqv2: https://github.com/facebookresearch/drqv2/blob/c0c650b76c6e5d22a7eb5f2edffd1440fe94f8ef/drqv2.py#L55\n return DRQv2ImageEncoder(obs_dim=obs_dim, out_size=out_size,\n hidden_dim=hidden_dim, hidden_depth=hidden_depth,\n image_hidden_num_channels=image_hidden_num_channels)\n else:\n raise NotImplementedError(f\"{architecture} is not an implemented image \"\n f\"encoder architecture\")"
},
{
"identifier": "StateActionSelfPredictiveRepresentationsNetworkEnsemble",
"path": "reed/models/self_predictive_representations_model.py",
"snippet": "class StateActionSelfPredictiveRepresentationsNetworkEnsemble(nn.Module):\n def __init__(self,\n device: torch.device,\n networks: t.Sequence[nn.Module]):\n \"\"\"\n Initial pass at an ensemble of networks used to train state-action representations that are consistent with\n the network's encoding of the state that results from applying the given action in the given state\n\n Args:\n device: which GPU or CPU device the network is to be run on\n networks: the networks that will make up the ensemble\n \"\"\"\n super(StateActionSelfPredictiveRepresentationsNetworkEnsemble, self).__init__()\n\n # convert the list of networks into a pytorch network list\n self._ensemble = nn.ModuleList(networks)\n\n # track the device\n self.device = device\n\n def __len__(self) -> int:\n \"\"\"\n The number of networks in the ensemble\n \"\"\"\n return len(self._ensemble)\n\n def __getitem__(self, item: int) -> nn.Module:\n return self._ensemble[item]\n\n def forward(self,\n transitions: t.List[EnvironmentContrastiveBatch]) -> t.Tuple[t.Sequence[torch.Tensor], t.Sequence[torch.Tensor]]:\n \"\"\"\n For each network, predict the representation of the next state and encode the given next state\n\n Args:\n transitions: a batch of environment transitions composed of states, actions, and next states for each\n network in the ensemble\n Returns:\n predicted embedding of the next state - p in the SimSiam paper\n next state embedding (detached from the tensor graph) - z in the SimSiam paper\n dimensionality: (batch, time step)\n \"\"\"\n next_state_preds = []\n projected_next_state_embeds = []\n for net_indx, net_batch in enumerate(transitions):\n net = self._ensemble[net_indx]\n # we need to convert the batch object a dictionary in case we are using nn.DataParallel\n next_state_pred, projected_next_state_embed = net(attr.asdict(net_batch))\n next_state_preds.append(next_state_pred)\n projected_next_state_embeds.append(projected_next_state_embed)\n\n # from the SimSiam paper, this is p and z\n return next_state_preds, projected_next_state_embeds\n\n def save(self, model_dir: Path, env_id: str, step: int):\n \"\"\"\n Save the ensemble to disk\n Args:\n model_dir: location to save the SFC nets\n env_id: the string identifier for the environment\n step: number of overall training steps taken before this save\n\n Returns:\n\n \"\"\"\n for net_indx, net in enumerate(self._ensemble):\n torch.save(net.state_dict(), f'{model_dir.as_posix()}/{env_id}_sfc_model_{step}_{net_indx}.pt')"
},
{
"identifier": "PreferenceTripletEnsembleDataLoader",
"path": "reed/data/preference_data_loader.py",
"snippet": "class PreferenceTripletEnsembleDataLoader:\n \"\"\"\n Handles loading and generating batches of preference triplets.\n\n The special logic needed is to handle different batch orderings for different networks in the reward ensemble\n \"\"\"\n def __init__(self, dataset: PreferenceDataset, ensemble_size: int,\n batch_size: int = 64, num_workers: int = 0, shuffle: bool = True, device: torch.device = \"cuda\"):\n \"\"\"\n Args:\n\n \"\"\"\n # create a data loader per ensemble network\n self.loader_ensemble = [DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n for _ in range(ensemble_size)]\n\n self.device = device\n\n def _format_batch(self, batch: UNFORMATTED_PREFERENCE_TRIPLET_BATCH) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Format the preference batch so that the tensors are longs and on the correct device\n \"\"\"\n return [PreferenceTripletBatch(trajectories_one=member[0].float().to(self.device),\n trajectories_two=member[1].float().to(self.device),\n preference_labels=member[2].long().to(self.device))\n for member in batch]\n\n def dataset_length(self) -> int:\n return len(self.loader_ensemble[0].dataset)\n\n def __iter__(self) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Iterate through the preference triplet data loaders and return the batch per ensemble member\n\n Returns:\n list of PreferenceTripletBatch\n \"\"\"\n # set up each loader as an iterator\n iter_loader_ensemble = [iter(loader) for loader in self.loader_ensemble]\n # for each data loader grab the next batch until there are no more batches to grab\n while True:\n # check if there is a next batch to return\n try:\n yield self._format_batch([next(dataloader_iterator) for dataloader_iterator in iter_loader_ensemble])\n except StopIteration:\n break"
}
] | import typing as t
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import abstractmethod
from collections import OrderedDict
from reed.models.image_encoder import get_image_encoder
from reed.models.self_predictive_representations_model import StateActionSelfPredictiveRepresentationsNetworkEnsemble
from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader | 4,613 | final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that will encode the state features
self._state_encoder = nn.Sequential(OrderedDict([
('state_dense1', nn.Linear(self._in_size, self._state_embed_size)),
('state_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('state_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the netowrk that will encode the action features
self._action_encoder = nn.Sequential(OrderedDict([
('action_dense1', nn.Linear(self._action_size, self._action_embed_size)),
('action_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('action_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the network that models the relationship between the state anc action embeddings
state_action_encoder = []
for i in range(self._num_layers):
state_action_encoder.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._state_embed_size + self._action_embed_size), self._hidden_size)))
state_action_encoder.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
state_action_encoder.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._state_action_encoder = nn.Sequential(OrderedDict(state_action_encoder))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
# encode the state, the action, and the state-action pair
if len(states_action_pairs.size()) == 1:
states_embed = self._state_encoder(states_action_pairs[:self._in_size])
actions_embed = self._action_encoder(states_action_pairs[-self._action_size:])
elif len(states_action_pairs.size()) == 2:
states_embed = self._state_encoder(states_action_pairs[:, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, -self._action_size:])
elif len(states_action_pairs.size()) == 3:
states_embed = self._state_encoder(states_action_pairs[:, :, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, :, -self._action_size:])
else:
raise NotImplementedError()
state_action_embed = self._state_action_encoder(torch.cat([states_embed, actions_embed], dim=-1))
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class ImageStateActionNetwork(_BaseModel):
def __init__(self, obs_dim: t.List[int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False,
image_encoder_architecture: str = "pixl2r",
image_hidden_num_channels: int = 32,
*kwargs):
"""
Maps state-action pairs to some type of value where the state is an image
Args:
obs_dim: dimensionality of the state images (height, width, channels)
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder
"""
self._image_encoder_architecture = image_encoder_architecture
assert image_encoder_architecture in {"pixl2r", "drqv2"}
self._image_hidden_num_channels = image_hidden_num_channels
super(ImageStateActionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
"""
# build the image encoder
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def _to_grayscale(img_obs: np.ndarray, batch_states: bool) -> np.ndarray:
"""
Convert the RGB image observations to grayscale
Args:
img_obs: the batch of image observations to convert to grayscale
batch_states: whether a batch of observations or a single observation is to be processed
Returns:
the grayscale batch os images
"""
if batch_states:
obs = img_obs.astype(float)
obs[:, :, :, 0] *= 0.1140
obs[:, :, :, 1] *= 0.587
obs[:, :, :, 2] *= 0.2989
return np.sum(obs, axis=-1, keepdims=True)
else:
obs = img_obs.astype(float)
obs[:, :, 0] *= 0.1140
obs[:, :, 1] *= 0.587
obs[:, :, 2] *= 0.2989
return np.sum(obs, axis=-1, keepdims=True)
class _BaseModel(nn.Module):
"""
A base reward model
"""
def __init__(self, in_dim: t.Union[t.List[int], int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
A network to consume the state-based environment observations and actions
Args:
in_dim: dimensionality of the model's input
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
super(_BaseModel, self).__init__()
# track the dimensionality of the input, the output, and the hidden dimensions
self._in_size = in_dim
self._out_size = out_size
self._hidden_size = hidden_dim
self._num_layers = hidden_depth
self._final_activation_type = final_activation
self._dropout_prob = dropout_probability
self._train_with_dropout = train_with_dropout
self._dropout_enabled = dropout_probability > 0
self._build()
@abstractmethod
def _build(self):
"""
Build the network
"""
pass
@abstractmethod
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
pass
def _enable_dropout(self):
""" Function to enable the dropout layers, e.g. during test-time """
for m in self.modules():
if 'dropout' in m.__class__.__name__:
print(m)
m.train()
self._dropout_enabled = True
def _disable_dropout(self):
""" Function to disable the dropout layers, e.g. during train time"""
for m in self.modules():
if 'dropout' in m.__class__.__name__:
m.eval()
self._dropout_enabled = False
def forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
if self.training and not self._train_with_dropout and self._dropout_prob > 0:
self._disable_dropout()
return self._forward(states_action_pairs)
def weight_decay_parameters(self) -> t.Tuple[t.Sequence, t.Sequence]:
"""
Sort the model parameters by whether weight decay can be applied to them
Returns:
with weight decay params
without weight decay params
"""
# need to track which weights will have L2 penalty (weight decay) applied and which won't
params_with_wd = []
params_without_wd = []
for m in self.modules():
# we get the nested Modules in their nested structure
# skip modules until we get the to leaf node modules
if len(list(m.children())) > 0: continue
if isinstance(m, nn.Linear):
params_with_wd.append(m.weight)
params_without_wd.append(m.bias)
else:
params_without_wd.extend(m.parameters())
return params_with_wd, params_without_wd
def from_pretrained(self, state_dict: t.OrderedDict[str, torch.Tensor]):
"""
Load the given state dictionary to the model
Args:
state_dict: the state dictionary to load into memory
Returns:
"""
self.load_state_dict(state_dict)
def estimate_uncertainty(self, states_action_pairs: torch.Tensor, num_samples: int = 100) -> np.ndarray:
"""
Estimate model uncertainty over the given batch of data
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
num_samples: the number of forward passes with different dropout configurations to run to estimate
the uncertainty
Returns:
variance over predictions across the different dropout configurations
"""
with torch.no_grad():
# check how dropout started, because we want to leave it how we started
dropout_start_enabled = self._dropout_enabled
if not dropout_start_enabled:
self._enable_dropout()
# estimate the predicted values num_samples many times
repeat_estimates = []
for _ in range(num_samples):
estimate = self._forward(states_action_pairs).detach().cpu().numpy()
repeat_estimates.append(estimate)
if not dropout_start_enabled:
self._disable_dropout()
# combine the estimations
estimates = np.concatenate(repeat_estimates, axis=-1)
mean_estimation = np.mean(estimates, axis=-1, keepdims=True)
return np.mean(np.square(np.subtract(mean_estimation, estimates)), axis=-1)
def forward_with_dropout(self, states_action_pairs: torch.Tensor, num_samples: int = 100) -> np.ndarray:
"""
Execute a forward pass of the given data with all but the dropout layers in eval mode
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
num_samples: the number of forward passes with different dropout configurations to run to estimate
the uncertainty
Returns:
dropout predictions across the different dropout configurations
"""
with torch.no_grad():
# check how dropout started, because we want to leave it how we started
dropout_start_enabled = self._dropout_enabled
if not dropout_start_enabled:
self._enable_dropout()
# estimate the predicted values num_samples many times
repeat_estimates = []
for _ in range(num_samples):
estimate = self._forward(states_action_pairs).detach().cpu().numpy()
repeat_estimates.append(estimate)
# combine the estimations
estimates = np.hstack(repeat_estimates)
if not dropout_start_enabled:
self._disable_dropout()
return estimates
def random_init_head(self):
"""
Set the final layers to be randomly initialized values
"""
self._prediction_head.reset_parameters()
class StateActionNetwork(_BaseModel):
def __init__(self, in_dim: int, out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
A network to consume the state-based environment observations and actions
Args:
in_dim: dimensionality of the model's input
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
super(StateActionNetwork, self).__init__(
in_dim=in_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that models the relationship between the state anc action embeddings
network_body = []
for i in range(self._num_layers):
network_body.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._in_size), self._hidden_size)))
network_body.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
network_body.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._network_body = nn.Sequential(OrderedDict(network_body))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
state_action_embed = self._network_body(states_action_pairs)
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class StateActionFusionNetwork(_BaseModel):
def __init__(self, obs_dim: int, action_dim: int, out_size: int = 1,
obs_embed_dim: int = 64, action_embed_dim: int = 64,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
Initial pass at a network used to train state-action representations that are consistent with
the network's encoding of the state that results from applying the given action in the given state
Args:
obs_dim: dimensionality of the states
action_dim: dimensionality of the actions
out_size: the size of the output
obs_embed_dim: the size of the state embedding
action_embed_dim: the size of the action embedding
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
self._action_size = action_dim
self._state_embed_size = obs_embed_dim # int(self._hidden_size/2)
self._action_embed_size = action_embed_dim # int(self._hidden_size/2)
super(StateActionFusionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that will encode the state features
self._state_encoder = nn.Sequential(OrderedDict([
('state_dense1', nn.Linear(self._in_size, self._state_embed_size)),
('state_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('state_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the netowrk that will encode the action features
self._action_encoder = nn.Sequential(OrderedDict([
('action_dense1', nn.Linear(self._action_size, self._action_embed_size)),
('action_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('action_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the network that models the relationship between the state anc action embeddings
state_action_encoder = []
for i in range(self._num_layers):
state_action_encoder.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._state_embed_size + self._action_embed_size), self._hidden_size)))
state_action_encoder.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
state_action_encoder.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._state_action_encoder = nn.Sequential(OrderedDict(state_action_encoder))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
# encode the state, the action, and the state-action pair
if len(states_action_pairs.size()) == 1:
states_embed = self._state_encoder(states_action_pairs[:self._in_size])
actions_embed = self._action_encoder(states_action_pairs[-self._action_size:])
elif len(states_action_pairs.size()) == 2:
states_embed = self._state_encoder(states_action_pairs[:, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, -self._action_size:])
elif len(states_action_pairs.size()) == 3:
states_embed = self._state_encoder(states_action_pairs[:, :, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, :, -self._action_size:])
else:
raise NotImplementedError()
state_action_embed = self._state_action_encoder(torch.cat([states_embed, actions_embed], dim=-1))
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class ImageStateActionNetwork(_BaseModel):
def __init__(self, obs_dim: t.List[int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False,
image_encoder_architecture: str = "pixl2r",
image_hidden_num_channels: int = 32,
*kwargs):
"""
Maps state-action pairs to some type of value where the state is an image
Args:
obs_dim: dimensionality of the state images (height, width, channels)
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder
"""
self._image_encoder_architecture = image_encoder_architecture
assert image_encoder_architecture in {"pixl2r", "drqv2"}
self._image_hidden_num_channels = image_hidden_num_channels
super(ImageStateActionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
"""
# build the image encoder | self.convnet = get_image_encoder( | 0 | 2023-11-06 23:14:20+00:00 | 8k |
ApolloAuto/apollo-model-yolox | exps/example/yolox_voc/yolox_voc_l.py | [
{
"identifier": "get_yolox_datadir",
"path": "yolox/data/dataloading.py",
"snippet": "def get_yolox_datadir():\n \"\"\"\n get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,\n this function will return value of the environment variable. Otherwise, use data\n \"\"\"\n yolox_datadir = os.getenv(\"YOLOX_DATADIR\", None)\n if yolox_datadir is None:\n import yolox\n\n yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))\n yolox_datadir = os.path.join(yolox_path, \"datasets\")\n return yolox_datadir"
},
{
"identifier": "Exp",
"path": "yolox/exp/yolox_base.py",
"snippet": "class Exp(BaseExp):\n def __init__(self):\n super().__init__()\n\n # ---------------- model config ---------------- #\n # detect classes number of model\n self.num_classes = 8\n # factor of model depth\n self.depth = 1.00\n # factor of model width\n self.width = 1.00\n # activation name. For example, if using \"relu\", then \"silu\" will be replaced to \"relu\".\n self.act = \"silu\"\n\n # ---------------- dataloader config ---------------- #\n # set worker to 4 for shorter dataloader init time\n # If your training process cost many memory, reduce this value.\n self.data_num_workers = 4\n self.input_size = (640, 640) # (height, width)\n # Actual multiscale ranges: [640 - 5 * 32, 640 + 5 * 32].\n # To disable multiscale training, set the value to 0.\n self.multiscale_range = 5\n # You can uncomment this line to specify a multiscale range\n # self.random_size = (14, 26)\n # dir of dataset images, if data_dir is None, this project will use `datasets` dir\n self.data_dir = None\n # name of annotation file for training\n self.train_ann = \"instances_train2017.json\"\n # name of annotation file for evaluation\n self.val_ann = \"instances_val2017.json\"\n # name of annotation file for testing\n self.test_ann = \"instances_test2017.json\"\n\n # --------------- transform config ----------------- #\n # prob of applying mosaic aug\n self.mosaic_prob = 1.0\n # prob of applying mixup aug\n self.mixup_prob = 1.0\n # prob of applying hsv aug\n self.hsv_prob = 1.0\n # prob of applying flip aug\n self.flip_prob = 0.5\n # rotation angle range, for example, if set to 2, the true range is (-2, 2)\n self.degrees = 10.0\n # translate range, for example, if set to 0.1, the true range is (-0.1, 0.1)\n self.translate = 0.1\n self.mosaic_scale = (0.1, 2)\n # apply mixup aug or not\n self.enable_mixup = True\n self.mixup_scale = (0.5, 1.5)\n # shear angle range, for example, if set to 2, the true range is (-2, 2)\n self.shear = 2.0\n\n # -------------- training config --------------------- #\n # epoch number used for warmup\n self.warmup_epochs = 5\n # max training epoch\n self.max_epoch = 300\n # minimum learning rate during warmup\n self.warmup_lr = 0\n self.min_lr_ratio = 0.05\n # learning rate for one image. During training, lr will multiply batchsize.\n self.basic_lr_per_img = 0.01 / 64.0\n # name of LRScheduler\n self.scheduler = \"yoloxwarmcos\"\n # last #epoch to close augmention like mosaic\n self.no_aug_epochs = 15\n # apply EMA during training\n self.ema = True\n\n # weight decay of optimizer\n self.weight_decay = 5e-4\n # momentum of optimizer\n self.momentum = 0.9\n # log period in iter, for example,\n # if set to 1, user could see log every iteration.\n self.print_interval = 10\n # eval period in epoch, for example,\n # if set to 1, model will be evaluate after every epoch.\n self.eval_interval = 10\n # save history checkpoint or not.\n # If set to False, yolox will only save latest and best ckpt.\n self.save_history_ckpt = True\n # name of experiment\n self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n\n # ----------------- testing config ------------------ #\n # output image size during evaluation/test\n self.test_size = (640, 640)\n # confidence threshold during evaluation/test,\n # boxes whose scores are less than test_conf will be filtered\n self.test_conf = 0.01\n # nms threshold\n self.nmsthre = 0.65\n\n def get_model(self):\n from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead\n\n def init_yolo(M):\n for m in M.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eps = 1e-3\n m.momentum = 0.03\n\n if getattr(self, \"model\", None) is None:\n in_channels = [256, 512, 1024]\n backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, act=self.act)\n head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, act=self.act)\n self.model = YOLOX(backbone, head)\n\n self.model.apply(init_yolo)\n self.model.head.initialize_biases(1e-2)\n self.model.train()\n return self.model\n\n def get_dataset(self, cache: bool = False, cache_type: str = \"ram\"):\n \"\"\"\n Get dataset according to cache and cache_type parameters.\n Args:\n cache (bool): Whether to cache imgs to ram or disk.\n cache_type (str, optional): Defaults to \"ram\".\n \"ram\" : Caching imgs to ram for fast training.\n \"disk\": Caching imgs to disk for fast training.\n \"\"\"\n from yolox.data import COCODataset, TrainTransform\n\n return COCODataset(\n data_dir=self.data_dir,\n json_file=self.train_ann,\n img_size=self.input_size,\n preproc=TrainTransform(\n max_labels=50,\n flip_prob=self.flip_prob,\n hsv_prob=self.hsv_prob\n ),\n cache=cache,\n cache_type=cache_type,\n )\n\n def get_data_loader(self, batch_size, is_distributed, no_aug=False, cache_img: str = None):\n \"\"\"\n Get dataloader according to cache_img parameter.\n Args:\n no_aug (bool, optional): Whether to turn off mosaic data enhancement. Defaults to False.\n cache_img (str, optional): cache_img is equivalent to cache_type. Defaults to None.\n \"ram\" : Caching imgs to ram for fast training.\n \"disk\": Caching imgs to disk for fast training.\n None: Do not use cache, in this case cache_data is also None.\n \"\"\"\n from yolox.data import (\n TrainTransform,\n YoloBatchSampler,\n DataLoader,\n InfiniteSampler,\n MosaicDetection,\n worker_init_reset_seed,\n )\n from yolox.utils import wait_for_the_master\n\n # if cache is True, we will create self.dataset before launch\n # else we will create self.dataset after launch\n if self.dataset is None:\n with wait_for_the_master():\n assert cache_img is None, \\\n \"cache_img must be None if you didn't create self.dataset before launch\"\n self.dataset = self.get_dataset(cache=False, cache_type=cache_img)\n \n self.dataset = MosaicDetection(\n dataset=self.dataset,\n mosaic=not no_aug,\n img_size=self.input_size,\n preproc=TrainTransform(\n max_labels=120,\n flip_prob=self.flip_prob,\n hsv_prob=self.hsv_prob),\n degrees=self.degrees,\n translate=self.translate,\n mosaic_scale=self.mosaic_scale,\n mixup_scale=self.mixup_scale,\n shear=self.shear,\n enable_mixup=self.enable_mixup,\n mosaic_prob=self.mosaic_prob,\n mixup_prob=self.mixup_prob,\n )\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n\n sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)\n\n batch_sampler = YoloBatchSampler(\n sampler=sampler,\n batch_size=batch_size,\n drop_last=False,\n mosaic=not no_aug,\n )\n\n dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n\n # Make sure each process has different random seed, especially for 'fork' method.\n # Check https://github.com/pytorch/pytorch/issues/63311 for more details.\n dataloader_kwargs[\"worker_init_fn\"] = worker_init_reset_seed\n\n train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n return train_loader\n\n def random_resize(self, data_loader, epoch, rank, is_distributed):\n tensor = torch.LongTensor(2).cuda()\n\n if rank == 0:\n size_factor = self.input_size[1] * 1.0 / self.input_size[0]\n if not hasattr(self, 'random_size'):\n min_size = int(self.input_size[0] / 32) - self.multiscale_range\n max_size = int(self.input_size[0] / 32) + self.multiscale_range\n self.random_size = (min_size, max_size)\n size = random.randint(*self.random_size)\n size = (int(32 * size), 32 * int(size * size_factor))\n tensor[0] = size[0]\n tensor[1] = size[1]\n\n if is_distributed:\n dist.barrier()\n dist.broadcast(tensor, 0)\n\n input_size = (tensor[0].item(), tensor[1].item())\n return input_size\n\n def preprocess(self, inputs, targets, tsize):\n scale_y = tsize[0] / self.input_size[0]\n scale_x = tsize[1] / self.input_size[1]\n if scale_x != 1 or scale_y != 1:\n inputs = nn.functional.interpolate(\n inputs, size=tsize, mode=\"bilinear\", align_corners=False\n )\n targets[..., 1::2] = targets[..., 1::2] * scale_x\n targets[..., 2::2] = targets[..., 2::2] * scale_y\n return inputs, targets\n\n def get_optimizer(self, batch_size):\n if \"optimizer\" not in self.__dict__:\n if self.warmup_epochs > 0:\n lr = self.warmup_lr\n else:\n lr = self.basic_lr_per_img * batch_size\n\n pg0, pg1, pg2 = [], [], [] # optimizer parameter groups\n\n for k, v in self.model.named_modules():\n if hasattr(v, \"bias\") and isinstance(v.bias, nn.Parameter):\n pg2.append(v.bias) # biases\n if isinstance(v, nn.BatchNorm2d) or \"bn\" in k:\n pg0.append(v.weight) # no decay\n elif hasattr(v, \"weight\") and isinstance(v.weight, nn.Parameter):\n pg1.append(v.weight) # apply decay\n\n optimizer = torch.optim.SGD(\n pg0, lr=lr, momentum=self.momentum, nesterov=True\n )\n optimizer.add_param_group(\n {\"params\": pg1, \"weight_decay\": self.weight_decay}\n ) # add pg1 with weight_decay\n optimizer.add_param_group({\"params\": pg2})\n self.optimizer = optimizer\n\n return self.optimizer\n\n def get_lr_scheduler(self, lr, iters_per_epoch):\n from yolox.utils import LRScheduler\n\n scheduler = LRScheduler(\n self.scheduler,\n lr,\n iters_per_epoch,\n self.max_epoch,\n warmup_epochs=self.warmup_epochs,\n warmup_lr_start=self.warmup_lr,\n no_aug_epochs=self.no_aug_epochs,\n min_lr_ratio=self.min_lr_ratio,\n )\n return scheduler\n\n def get_eval_dataset(self, **kwargs):\n from yolox.data import COCODataset, ValTransform\n testdev = kwargs.get(\"testdev\", False)\n legacy = kwargs.get(\"legacy\", False)\n\n return COCODataset(\n data_dir=self.data_dir,\n json_file=self.val_ann if not testdev else self.test_ann,\n name=\"val2017\" if not testdev else \"test2017\",\n img_size=self.test_size,\n preproc=ValTransform(legacy=legacy),\n )\n\n def get_eval_loader(self, batch_size, is_distributed, **kwargs):\n valdataset = self.get_eval_dataset(**kwargs)\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n sampler = torch.utils.data.distributed.DistributedSampler(\n valdataset, shuffle=False\n )\n else:\n sampler = torch.utils.data.SequentialSampler(valdataset)\n\n dataloader_kwargs = {\n \"num_workers\": self.data_num_workers,\n \"pin_memory\": True,\n \"sampler\": sampler,\n }\n dataloader_kwargs[\"batch_size\"] = batch_size\n val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n return val_loader\n\n def get_evaluator(self, batch_size, is_distributed, testdev=False, legacy=False):\n from yolox.evaluators import COCOEvaluator\n\n return COCOEvaluator(\n dataloader=self.get_eval_loader(batch_size, is_distributed,\n testdev=testdev, legacy=legacy),\n img_size=self.test_size,\n confthre=self.test_conf,\n nmsthre=self.nmsthre,\n num_classes=self.num_classes,\n testdev=testdev,\n )\n\n def get_trainer(self, args):\n from yolox.core import Trainer\n trainer = Trainer(self, args)\n # NOTE: trainer shouldn't be an attribute of exp object\n return trainer\n\n def eval(self, model, evaluator, is_distributed, half=False, return_outputs=False):\n return evaluator.evaluate(model, is_distributed, half, return_outputs=return_outputs)"
}
] | import os
from yolox.data import get_yolox_datadir
from yolox.exp import Exp as MyExp
from yolox.data import VOCDetection, TrainTransform
from yolox.data import VOCDetection, ValTransform
from yolox.evaluators import VOCEvaluator | 3,834 | # encoding: utf-8
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 8 # TODO: KITTI class is 6
self.depth = 1.0
self.width = 1.0
self.warmup_epochs = 1
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.flip_prob = 0.5
self.hsv_prob = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_dataset(self, cache: bool, cache_type: str = "disk"):
return VOCDetection(
| # encoding: utf-8
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 8 # TODO: KITTI class is 6
self.depth = 1.0
self.width = 1.0
self.warmup_epochs = 1
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.flip_prob = 0.5
self.hsv_prob = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_dataset(self, cache: bool, cache_type: str = "disk"):
return VOCDetection( | data_dir=os.path.join(get_yolox_datadir(), "CUSTOMER"), # TODO: CUSTOMER to KITTI | 0 | 2023-11-08 07:07:24+00:00 | 8k |
ndiamant/spice | spice/datasets.py | [
{
"identifier": "select_bins",
"path": "spice/conditional_histogram.py",
"snippet": "def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:\n return unique_quantile(y, n_bins, first_bin_zero=False)"
},
{
"identifier": "discretize",
"path": "spice/conditional_histogram.py",
"snippet": "def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor:\n return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)"
},
{
"identifier": "score_to_q_hat",
"path": "spice/utils.py",
"snippet": "def score_to_q_hat(score: torch.Tensor, alpha: float) -> float:\n n = score.shape[0]\n quantile = math.ceil((n + 1) * (1 - alpha)) / n\n q_hat = score.quantile(quantile).item()\n return q_hat"
}
] | import os
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
from pathlib import Path
from torch.utils.data import TensorDataset, DataLoader
from pytorch_lightning import LightningDataModule
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from torch.distributions import Gamma
from spice.conditional_histogram import select_bins, discretize
from spice.utils import score_to_q_hat
from sklearn.impute import SimpleImputer | 4,841 | if 'blog_data' in name:
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(os.path.join(base_path, 'blogData_train.csv'), header=None)
X = df.iloc[:, 0:280].values
if name.endswith("_og"):
y = df.iloc[:, -1].values
else:
y = np.log(0.1 + df.iloc[:, -1].values)
if name == "concrete":
dataset = np.loadtxt(open(os.path.join(base_path, 'Concrete_Data.csv'), "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:].squeeze()
if name == "bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df = pd.read_csv(os.path.join(base_path, 'bike_train.csv'))
# # seperating season as per values. this is bcoz this will enhance features.
season = pd.get_dummies(df['season'], prefix='season')
df = pd.concat([df, season], axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather = pd.get_dummies(df['weather'], prefix='weather')
df = pd.concat([df, weather], axis=1)
# # # now can drop weather and season.
df.drop(['season', 'weather'], inplace=True, axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011: 0, 2012: 1})
df.drop('datetime', axis=1, inplace=True)
df.drop(['casual', 'registered'], axis=1, inplace=True)
df.columns.to_series().groupby(df.dtypes).groups
X = df.drop('count', axis=1).values
y = df['count'].values
if name == "community":
# https://github.com/vbordalo/Communities-Crime/blob/master/Crime_v1.ipynb
attrib = pd.read_csv(os.path.join(base_path, 'communities_attributes.csv'), delim_whitespace=True)
data = pd.read_csv(os.path.join(base_path, 'communities.data'), names=attrib['attributes'])
data = data.drop(columns=['state', 'county',
'community', 'communityname',
'fold'], axis=1)
data = data.replace('?', np.nan)
# Impute mean values for samples with missing values
imputer = SimpleImputer(strategy='mean')
imputer.fit(data[['OtherPerCap']])
data[['OtherPerCap']] = imputer.transform(data[['OtherPerCap']])
data = data.dropna(axis=1)
X = data.iloc[:, 0:100].values
y = data.iloc[:, 100].values
if name == "temperature":
df = pd.read_csv(os.path.join(base_path, "temperature.csv"))
df = df.drop(columns=['station', 'Date', 'Next_Tmax'])
df = df.dropna()
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
if name == "synth_het":
torch.manual_seed(5)
x = torch.linspace(0, 1, 2000)
noise = x * torch.rand_like(x) + 0.1
indicator = torch.randint_like(x, 2)
y = torch.where(indicator == 1, noise, -noise)
X = x.unsqueeze(1).numpy()
y = y.numpy()
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
class RegressionData(LightningDataModule):
def __init__(
self, name: str, y_scaling: str = "min_max",
batch_size: int = 512, discretize_n_bins: int = None,
train_seed: int = 57771, smart_discretize: bool = True,
):
super().__init__()
x, y = get_dataset(name)
y = y.reshape(y.shape[0], 1)
np.random.seed(112123)
n = y.shape[0]
# train, val, calibrate, val calibration, test
dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))
test_idx = dset_idx == 4
# shuffle the train split based on the seed
np.random.seed(train_seed)
dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])
train_idx = dset_idx == 0
val_idx = dset_idx == 1
cal_idx = dset_idx == 2
cal_val_idx = dset_idx == 3
# scaling
y_scaler = {
"min_max": MinMaxScaler(feature_range=(0, 1 - 1e-5)),
"std": StandardScaler(),
}[y_scaling]
y_train = y[train_idx]
y_scaler.fit(y_train)
x_train = x[train_idx]
x_scaler = StandardScaler()
x_scaler.fit(x_train)
x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)
y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)
# discretize for histogram case
self.bins = None
if discretize_n_bins is not None:
transformed_train_y = torch.tensor(y_scaler.transform(y_train))
if smart_discretize:
|
DATASET_DIR = os.path.join(
Path(__file__).parent.parent, "datasets",
)
DATASET_NAMES = {
"star", "bio", "concrete", "bike", "community", "temperature",
"meps_19_og", "meps_20_og", "meps_21_og", "blog_data_og",
"synthetic_bimodal", "synth_het",
}
def add_gamma_studies():
for concentration in [6, 3, 1, 0.5, 0.1, 0.02]:
for negative in [False, True]:
neg_str = "neg" if negative else "pos"
DATASET_NAMES.add(f"synthetic_gamma_{concentration}_{neg_str}")
# add_gamma_studies()
def synthetic_bimodal() -> tuple[np.ndarray, np.ndarray]:
torch.manual_seed(5)
d = 8
n = 2000
x = torch.randn((n, d))
w = torch.randn((d, 1)) / d
w_switch = torch.randn((d, 1)) / d
switch = torch.sigmoid(x @ w_switch)
y = x @ w
y = y + torch.randn_like(y) / 5
y = torch.where(torch.rand((n, 1)) > switch, y + 1, y - 1)
y /= y.abs().max() * 2
y += 0.5
return x.numpy(), y.squeeze().numpy()
@torch.no_grad()
def synthetic_gamma(concentration: float, negative: bool = False) -> tuple[np.ndarray, np.ndarray]:
torch.manual_seed(5)
d = 8
n = 2000
x = torch.randn((n, d))
w = torch.randn((d, 1)) / d
y = x @ w
gamma = Gamma(rate=1.0, concentration=concentration)
samples = gamma.rsample(y.shape)
samples /= samples.std()
y = (y - samples) if negative else (y + samples)
return x.numpy(), y.squeeze().numpy()
def get_dataset(name: str, base_path: str = DATASET_DIR):
"""from https://github.com/yromano/cqr/tree/master/datasets"""
""" Load a dataset
Parameters
----------
name : string, dataset name
base_path : string, e.g. "path/to/datasets/directory/"
Returns
-------
X : features (nXp)
y : labels (n)
"""
assert name in DATASET_NAMES
if name == "synthetic_bimodal":
return synthetic_bimodal()
if "synthetic_gamma" in name:
concentration = float(name.split("_")[-2])
negative = name.endswith("_neg")
X, y = synthetic_gamma(concentration, negative)
if "meps_19" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_19_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if "meps_20" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_20_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if "meps_21" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_21_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT16F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if name == "star":
df = pd.read_csv(os.path.join(base_path,'STAR.csv'))
df.loc[df['gender'] == 'female', 'gender'] = 0
df.loc[df['gender'] == 'male', 'gender'] = 1
df.loc[df['ethnicity'] == 'cauc', 'ethnicity'] = 0
df.loc[df['ethnicity'] == 'afam', 'ethnicity'] = 1
df.loc[df['ethnicity'] == 'asian', 'ethnicity'] = 2
df.loc[df['ethnicity'] == 'hispanic', 'ethnicity'] = 3
df.loc[df['ethnicity'] == 'amindian', 'ethnicity'] = 4
df.loc[df['ethnicity'] == 'other', 'ethnicity'] = 5
df.loc[df['stark'] == 'regular', 'stark'] = 0
df.loc[df['stark'] == 'small', 'stark'] = 1
df.loc[df['stark'] == 'regular+aide', 'stark'] = 2
df.loc[df['star1'] == 'regular', 'star1'] = 0
df.loc[df['star1'] == 'small', 'star1'] = 1
df.loc[df['star1'] == 'regular+aide', 'star1'] = 2
df.loc[df['star2'] == 'regular', 'star2'] = 0
df.loc[df['star2'] == 'small', 'star2'] = 1
df.loc[df['star2'] == 'regular+aide', 'star2'] = 2
df.loc[df['star3'] == 'regular', 'star3'] = 0
df.loc[df['star3'] == 'small', 'star3'] = 1
df.loc[df['star3'] == 'regular+aide', 'star3'] = 2
df.loc[df['lunchk'] == 'free', 'lunchk'] = 0
df.loc[df['lunchk'] == 'non-free', 'lunchk'] = 1
df.loc[df['lunch1'] == 'free', 'lunch1'] = 0
df.loc[df['lunch1'] == 'non-free', 'lunch1'] = 1
df.loc[df['lunch2'] == 'free', 'lunch2'] = 0
df.loc[df['lunch2'] == 'non-free', 'lunch2'] = 1
df.loc[df['lunch3'] == 'free', 'lunch3'] = 0
df.loc[df['lunch3'] == 'non-free', 'lunch3'] = 1
df.loc[df['schoolk'] == 'inner-city', 'schoolk'] = 0
df.loc[df['schoolk'] == 'suburban', 'schoolk'] = 1
df.loc[df['schoolk'] == 'rural', 'schoolk'] = 2
df.loc[df['schoolk'] == 'urban', 'schoolk'] = 3
df.loc[df['school1'] == 'inner-city', 'school1'] = 0
df.loc[df['school1'] == 'suburban', 'school1'] = 1
df.loc[df['school1'] == 'rural', 'school1'] = 2
df.loc[df['school1'] == 'urban', 'school1'] = 3
df.loc[df['school2'] == 'inner-city', 'school2'] = 0
df.loc[df['school2'] == 'suburban', 'school2'] = 1
df.loc[df['school2'] == 'rural', 'school2'] = 2
df.loc[df['school2'] == 'urban', 'school2'] = 3
df.loc[df['school3'] == 'inner-city', 'school3'] = 0
df.loc[df['school3'] == 'suburban', 'school3'] = 1
df.loc[df['school3'] == 'rural', 'school3'] = 2
df.loc[df['school3'] == 'urban', 'school3'] = 3
df.loc[df['degreek'] == 'bachelor', 'degreek'] = 0
df.loc[df['degreek'] == 'master', 'degreek'] = 1
df.loc[df['degreek'] == 'specialist', 'degreek'] = 2
df.loc[df['degreek'] == 'master+', 'degreek'] = 3
df.loc[df['degree1'] == 'bachelor', 'degree1'] = 0
df.loc[df['degree1'] == 'master', 'degree1'] = 1
df.loc[df['degree1'] == 'specialist', 'degree1'] = 2
df.loc[df['degree1'] == 'phd', 'degree1'] = 3
df.loc[df['degree2'] == 'bachelor', 'degree2'] = 0
df.loc[df['degree2'] == 'master', 'degree2'] = 1
df.loc[df['degree2'] == 'specialist', 'degree2'] = 2
df.loc[df['degree2'] == 'phd', 'degree2'] = 3
df.loc[df['degree3'] == 'bachelor', 'degree3'] = 0
df.loc[df['degree3'] == 'master', 'degree3'] = 1
df.loc[df['degree3'] == 'specialist', 'degree3'] = 2
df.loc[df['degree3'] == 'phd', 'degree3'] = 3
df.loc[df['ladderk'] == 'level1', 'ladderk'] = 0
df.loc[df['ladderk'] == 'level2', 'ladderk'] = 1
df.loc[df['ladderk'] == 'level3', 'ladderk'] = 2
df.loc[df['ladderk'] == 'apprentice', 'ladderk'] = 3
df.loc[df['ladderk'] == 'probation', 'ladderk'] = 4
df.loc[df['ladderk'] == 'pending', 'ladderk'] = 5
df.loc[df['ladderk'] == 'notladder', 'ladderk'] = 6
df.loc[df['ladder1'] == 'level1', 'ladder1'] = 0
df.loc[df['ladder1'] == 'level2', 'ladder1'] = 1
df.loc[df['ladder1'] == 'level3', 'ladder1'] = 2
df.loc[df['ladder1'] == 'apprentice', 'ladder1'] = 3
df.loc[df['ladder1'] == 'probation', 'ladder1'] = 4
df.loc[df['ladder1'] == 'noladder', 'ladder1'] = 5
df.loc[df['ladder1'] == 'notladder', 'ladder1'] = 6
df.loc[df['ladder2'] == 'level1', 'ladder2'] = 0
df.loc[df['ladder2'] == 'level2', 'ladder2'] = 1
df.loc[df['ladder2'] == 'level3', 'ladder2'] = 2
df.loc[df['ladder2'] == 'apprentice', 'ladder2'] = 3
df.loc[df['ladder2'] == 'probation', 'ladder2'] = 4
df.loc[df['ladder2'] == 'noladder', 'ladder2'] = 5
df.loc[df['ladder2'] == 'notladder', 'ladder2'] = 6
df.loc[df['ladder3'] == 'level1', 'ladder3'] = 0
df.loc[df['ladder3'] == 'level2', 'ladder3'] = 1
df.loc[df['ladder3'] == 'level3', 'ladder3'] = 2
df.loc[df['ladder3'] == 'apprentice', 'ladder3'] = 3
df.loc[df['ladder3'] == 'probation', 'ladder3'] = 4
df.loc[df['ladder3'] == 'noladder', 'ladder3'] = 5
df.loc[df['ladder3'] == 'notladder', 'ladder3'] = 6
df.loc[df['tethnicityk'] == 'cauc', 'tethnicityk'] = 0
df.loc[df['tethnicityk'] == 'afam', 'tethnicityk'] = 1
df.loc[df['tethnicity1'] == 'cauc', 'tethnicity1'] = 0
df.loc[df['tethnicity1'] == 'afam', 'tethnicity1'] = 1
df.loc[df['tethnicity2'] == 'cauc', 'tethnicity2'] = 0
df.loc[df['tethnicity2'] == 'afam', 'tethnicity2'] = 1
df.loc[df['tethnicity3'] == 'cauc', 'tethnicity3'] = 0
df.loc[df['tethnicity3'] == 'afam', 'tethnicity3'] = 1
df.loc[df['tethnicity3'] == 'asian', 'tethnicity3'] = 2
df = df.dropna()
grade = df["readk"] + df["read1"] + df["read2"] + df["read3"]
grade += df["mathk"] + df["math1"] + df["math2"] + df["math3"]
names = df.columns
target_names = names[8:16]
data_names = np.concatenate((names[0:8], names[17:]))
X = df.loc[:, data_names].values
y = grade.values
if name == "facebook_1":
df = pd.read_csv(base_path + 'facebook/Features_Variant_1.csv')
y = df.iloc[:, 53].values
X = df.iloc[:, 0:53].values
if name == "facebook_2":
df = pd.read_csv(base_path + 'facebook/Features_Variant_2.csv')
y = df.iloc[:, 53].values
X = df.iloc[:, 0:53].values
if name == "bio":
# https://github.com/joefavergel/TertiaryPhysicochemicalProperties/blob/master/RMSD-ProteinTertiaryStructures.ipynb
df = pd.read_csv(os.path.join(base_path, 'CASP.csv'))
y = df.iloc[:, 0].values
X = df.iloc[:, 1:].values
if 'blog_data' in name:
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(os.path.join(base_path, 'blogData_train.csv'), header=None)
X = df.iloc[:, 0:280].values
if name.endswith("_og"):
y = df.iloc[:, -1].values
else:
y = np.log(0.1 + df.iloc[:, -1].values)
if name == "concrete":
dataset = np.loadtxt(open(os.path.join(base_path, 'Concrete_Data.csv'), "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:].squeeze()
if name == "bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df = pd.read_csv(os.path.join(base_path, 'bike_train.csv'))
# # seperating season as per values. this is bcoz this will enhance features.
season = pd.get_dummies(df['season'], prefix='season')
df = pd.concat([df, season], axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather = pd.get_dummies(df['weather'], prefix='weather')
df = pd.concat([df, weather], axis=1)
# # # now can drop weather and season.
df.drop(['season', 'weather'], inplace=True, axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011: 0, 2012: 1})
df.drop('datetime', axis=1, inplace=True)
df.drop(['casual', 'registered'], axis=1, inplace=True)
df.columns.to_series().groupby(df.dtypes).groups
X = df.drop('count', axis=1).values
y = df['count'].values
if name == "community":
# https://github.com/vbordalo/Communities-Crime/blob/master/Crime_v1.ipynb
attrib = pd.read_csv(os.path.join(base_path, 'communities_attributes.csv'), delim_whitespace=True)
data = pd.read_csv(os.path.join(base_path, 'communities.data'), names=attrib['attributes'])
data = data.drop(columns=['state', 'county',
'community', 'communityname',
'fold'], axis=1)
data = data.replace('?', np.nan)
# Impute mean values for samples with missing values
imputer = SimpleImputer(strategy='mean')
imputer.fit(data[['OtherPerCap']])
data[['OtherPerCap']] = imputer.transform(data[['OtherPerCap']])
data = data.dropna(axis=1)
X = data.iloc[:, 0:100].values
y = data.iloc[:, 100].values
if name == "temperature":
df = pd.read_csv(os.path.join(base_path, "temperature.csv"))
df = df.drop(columns=['station', 'Date', 'Next_Tmax'])
df = df.dropna()
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
if name == "synth_het":
torch.manual_seed(5)
x = torch.linspace(0, 1, 2000)
noise = x * torch.rand_like(x) + 0.1
indicator = torch.randint_like(x, 2)
y = torch.where(indicator == 1, noise, -noise)
X = x.unsqueeze(1).numpy()
y = y.numpy()
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
class RegressionData(LightningDataModule):
def __init__(
self, name: str, y_scaling: str = "min_max",
batch_size: int = 512, discretize_n_bins: int = None,
train_seed: int = 57771, smart_discretize: bool = True,
):
super().__init__()
x, y = get_dataset(name)
y = y.reshape(y.shape[0], 1)
np.random.seed(112123)
n = y.shape[0]
# train, val, calibrate, val calibration, test
dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))
test_idx = dset_idx == 4
# shuffle the train split based on the seed
np.random.seed(train_seed)
dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])
train_idx = dset_idx == 0
val_idx = dset_idx == 1
cal_idx = dset_idx == 2
cal_val_idx = dset_idx == 3
# scaling
y_scaler = {
"min_max": MinMaxScaler(feature_range=(0, 1 - 1e-5)),
"std": StandardScaler(),
}[y_scaling]
y_train = y[train_idx]
y_scaler.fit(y_train)
x_train = x[train_idx]
x_scaler = StandardScaler()
x_scaler.fit(x_train)
x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)
y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)
# discretize for histogram case
self.bins = None
if discretize_n_bins is not None:
transformed_train_y = torch.tensor(y_scaler.transform(y_train))
if smart_discretize: | self.bins = select_bins(transformed_train_y, discretize_n_bins) | 0 | 2023-11-01 18:04:29+00:00 | 8k |
nik-sm/com-hom-emg | com_hom_emg/model.py | [
{
"identifier": "EmbeddingNetwork",
"path": "com_hom_emg/basic_arch.py",
"snippet": "class EmbeddingNetwork(nn.Module):\n # TODO - design the structure of this model.\n # - consider taking ideas from transformer encoders or other domains.\n # - search for papers that extract useful features from EMG\n def __init__(\n self,\n input_channels: int,\n input_time_length: int,\n feature_dim: int,\n normalized_features: bool,\n use_preprocessed_data: bool = False,\n ):\n super().__init__()\n layers = [\n *ResBlock(input_channels, 64),\n *ResBlock(64),\n *ResBlock(64),\n *ResBlock(64),\n *ResBlock(64),\n *ResBlock(64),\n ]\n # NOTE - preprocessing includes 4x downsample. If no preprocessing, include 2 more blocks of 2x pooling:\n if not use_preprocessed_data:\n layers.extend([*ResBlock(64), *ResBlock(64)])\n\n layers.append(nn.Flatten())\n self.model = nn.Sequential(*layers)\n dim_after = self.model(torch.zeros(1, input_channels, input_time_length)).shape[-1]\n logger.info(f\"Dimension after convolution: {dim_after}\")\n self.model.append(nn.Linear(dim_after, feature_dim, bias=False))\n self.model.append(nn.BatchNorm1d(feature_dim))\n self.model.append(nn.ReLU(inplace=True))\n self.model.append(nn.Linear(feature_dim, feature_dim))\n if normalized_features:\n # self.model.append(nn.BatchNorm1d(feature_dim))\n self.model.append(UnitNormLayer())\n\n def forward(self, data):\n return self.model(data)"
},
{
"identifier": "UnitNormLayer",
"path": "com_hom_emg/basic_arch.py",
"snippet": "class UnitNormLayer(nn.Module):\n def forward(self, x):\n return torch.nn.functional.normalize(x, dim=-1)"
},
{
"identifier": "Conformer",
"path": "com_hom_emg/conformer.py",
"snippet": "class Conformer(nn.Sequential):\n def __init__(self, feature_dim: int, normalized_features: bool, emb_size=K, depth=6):\n layers = [\n Rearrange(\"batch channel time -> batch () channel time\"),\n PatchEmbedding(emb_size),\n TransformerEncoder(depth, emb_size),\n nn.Flatten(),\n nn.Linear(1400, feature_dim),\n ]\n if normalized_features:\n # layers.append(nn.BatchNorm1d(feature_dim))\n layers.append(UnitNormLayer())\n\n super().__init__(*layers)"
},
{
"identifier": "DataModule",
"path": "com_hom_emg/data.py",
"snippet": "class DataModule(LightningDataModule):\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = parent_parser.add_argument_group(\"DataModule\")\n parser.add_argument(\"--fold\", type=int, required=True)\n parser.add_argument(\"--n_train_subj\", type=int, default=8)\n parser.add_argument(\"--n_val_subj\", type=int, default=1)\n parser.add_argument(\"--n_test_subj\", type=int, default=1)\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--num_workers\", type=int, default=8)\n parser.add_argument(\"--use_preprocessed_data\", type=str2bool, default=False)\n return parent_parser\n\n def __init__(\n self,\n *,\n # seed and per_subj_data come from cli\n seed: int,\n per_subj_data: dict,\n #\n fold: int,\n n_train_subj: int,\n n_val_subj: int,\n n_test_subj: int,\n batch_size: int,\n num_workers: int,\n use_preprocessed_data: bool,\n **kw,\n ):\n \"\"\"\n From N subjects, we select 1 for val, 1 for test, and N-2 for train.\n In each set, data are merged and shuffled.\n While loading, we distinguish single and double gestures for easier splitting during train steps.\n \"\"\"\n super().__init__()\n self.train_set, self.val_set, self.test_set = get_datasets(\n per_subj_data, fold, n_train_subj, n_val_subj, n_test_subj, use_preprocessed_data\n )\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.seed = seed\n self.example_data_shape = self.train_set.tensors[0][0].shape\n\n def get_loader(self, dataset, shuffle: bool):\n return DataLoader(\n dataset,\n shuffle=shuffle,\n pin_memory=True,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n worker_init_fn=seed_worker,\n generator=torch.Generator().manual_seed(self.seed),\n persistent_workers=True,\n )\n\n def train_dataloader(self):\n return self.get_loader(self.train_set, shuffle=True)\n\n def val_dataloader(self):\n return self.get_loader(self.val_set, shuffle=False)\n\n def test_dataloader(self):\n return self.get_loader(self.test_set, shuffle=False)"
},
{
"identifier": "get_per_subj_data",
"path": "com_hom_emg/data.py",
"snippet": "def get_per_subj_data():\n path = PROJECT_PATH / \"data\" / \"combination-gesture-dataset\" / \"python\"\n per_subj_data = {}\n for subj_idx in range(10):\n per_subj_data[subj_idx] = {\n \"data\": np.load(path / f\"subj{subj_idx}/data.npy\"),\n \"labels\": np.load(path / f\"subj{subj_idx}/labels.npy\"),\n }\n return per_subj_data"
},
{
"identifier": "shuffle_together",
"path": "com_hom_emg/data.py",
"snippet": "def shuffle_together(*tensors):\n \"\"\"Shuffle tensors together\"\"\"\n assert all(isinstance(x, torch.Tensor) for x in tensors)\n assert all(len(x) == len(tensors[0]) for x in tensors)\n p = torch.randperm(len(tensors[0]))\n return [x[p] for x in tensors]"
},
{
"identifier": "TripletCentroids",
"path": "com_hom_emg/loss.py",
"snippet": "class TripletCentroids(nn.Module):\n \"\"\"\n Randomly initialize a centroid for each class.\n For each item, form triplets by comparing it to class centroids\n (there is exactly one positive centroid, and one randomly chosen negative centroid)\n Update centroids gradually using momentum.\n \"\"\"\n\n def __init__(self, margin, feature_dim: int, device: str, momentum=0.9):\n super().__init__()\n self.margin = margin\n self.momentum = momentum\n # TODO - init the centroids farther apart or closer together?\n #\n # https://math.stackexchange.com/questions/917292/expected-distance-between-two-vectors-that-belong-to-two-different-gaussian-dist # noqa\n # Expected distance between two independent gaussian vectors of dimension D is:\n # E[ || x - y || ^ 2 ] = || mu_x - mu_y || ^ 2 + tr(Cov_x + Cov_y)\n # torch.randn(n_items, n_features) * sigma has (approximately) mean = 0,\n # and spherical covariance = sigma**2 * torch.eye(n_features)\n # Expected distance between any pair of centroids will be:\n #\n # dist = 0 + trace(Cov_1 + Cov_2) = 2 * sigma**2 * n_features\n # dist = 0 + trace(2 * sigma**2 * n_features)\n # dist = 2 * sigma**2 * n_features\n self.keys = {torch.tensor([d, m], device=device, requires_grad=False) for (d, m) in product(range(4), range(4))}\n self.real_centroids = {k: torch.randn((feature_dim,), device=device, requires_grad=False) for k in self.keys}\n self.fake_centroids = {k: torch.randn((feature_dim,), device=device, requires_grad=False) for k in self.keys}\n\n def forward(\n self,\n real_double_features: torch.Tensor,\n fake_double_features: torch.Tensor,\n real_double_labels: torch.Tensor,\n fake_double_labels: torch.Tensor,\n ):\n assert len(real_double_features) == len(real_double_labels)\n assert len(fake_double_features) == len(fake_double_labels)\n assert len(real_double_features) > 0\n assert len(fake_double_features) > 0\n\n # Loop over real classes, computing triplet losses\n # In first iteration, anchor items all belong to c0.\n # Next iter, all anchors belong to c1, etc.\n # For each anchor item, just compute 1 triplet.\n anchors, positives, negatives = [], [], []\n for label in self.keys:\n anchor_idx = real_double_labels.eq(label).all(-1)\n _anchors = real_double_features[anchor_idx]\n if len(_anchors) == 0:\n continue\n\n # Use the matching centroid from fake items as positive\n positive_centroid = self.fake_centroids[label]\n # Sample 1 negative centroid (with replacement) for each anchor item\n negative_classes = list(self.keys - {label})\n negative_centroid_labels = random.choices(negative_classes, k=len(_anchors))\n for a, n in zip(_anchors, negative_centroid_labels):\n negative_centroid = self.fake_centroids[n]\n anchors.append(a)\n positives.append(positive_centroid)\n negatives.append(negative_centroid)\n\n # Loop over fake classes as anchors\n anchors, positives, negatives = [], [], []\n for label in self.keys:\n anchor_idx = fake_double_labels.eq(label).all(-1)\n _anchors = fake_double_features[anchor_idx]\n if len(_anchors) == 0:\n continue\n\n # Use the matching centroid from real items as positive\n positive_centroid = self.real_centroids[label]\n # Sample 1 negative centroid (with replacement) for each anchor item\n negative_classes = list(self.keys - {label})\n negative_centroid_labels = random.choices(negative_classes, k=len(_anchors))\n for a, n in zip(_anchors, negative_centroid_labels):\n negative_centroid = self.real_centroids[n]\n anchors.append(a)\n positives.append(positive_centroid)\n negatives.append(negative_centroid)\n\n if len(anchors) == 0:\n logger.warning(\"No triplets found\")\n loss = torch.tensor(0.0)\n else:\n anchors = torch.stack(anchors)\n positives = torch.stack(positives)\n negatives = torch.stack(negatives)\n\n # Compute loss\n loss = F.triplet_margin_loss(anchors, positives, negatives, margin=self.margin)\n\n # Update centroids with momentum\n # (update after computing loss; same order as in SGD with momentum)\n with torch.no_grad():\n for label, prev in self.real_centroids.items():\n match_idx = real_double_labels.eq(label).all(-1)\n if match_idx.sum() == 0:\n continue\n curr = real_double_features[match_idx].mean(0).detach()\n self.real_centroids[label] = self.momentum * prev + (1 - self.momentum) * curr\n\n for label, prev in self.fake_centroids.items():\n match_idx = fake_double_labels.eq(label).all(-1)\n if match_idx.sum() == 0:\n continue\n curr = fake_double_features[match_idx].mean(0).detach()\n self.fake_centroids[label] = self.momentum * prev + (1 - self.momentum) * curr\n\n return loss"
},
{
"identifier": "TripletLoss",
"path": "com_hom_emg/loss.py",
"snippet": "class TripletLoss(nn.Module):\n \"\"\"A random positive and a random negative item are used for a triplet\"\"\"\n\n def __init__(self, margin: float, triplets_per_item: int = 1):\n super().__init__()\n self.margin = margin\n self.triplets_per_item = triplets_per_item\n\n def forward(\n self,\n real_double_features: torch.Tensor,\n fake_double_features: torch.Tensor,\n real_double_labels: torch.Tensor,\n fake_double_labels: torch.Tensor,\n ):\n assert len(real_double_features) == len(real_double_labels)\n assert len(fake_double_features) == len(fake_double_labels)\n assert len(real_double_features) > 0\n assert len(fake_double_features) > 0\n\n embeddings = torch.cat([real_double_features, fake_double_features], dim=0)\n labels = torch.cat([real_double_labels, fake_double_labels], dim=0)\n device = embeddings.device\n is_real = torch.cat(\n [\n torch.ones(len(real_double_labels), device=device),\n torch.zeros(len(fake_double_labels), device=device),\n ],\n )\n pairwise_dist = torch.cdist(embeddings, embeddings)\n\n # Masks: for each row, which items are valid as a positive or negative item\n # Positive items: same label, opposite realness\n # Negative items: diff label, opposite realness\n positive_mask, negative_mask = get_masks(labels, is_real)\n positive_mask = positive_mask.int()\n negative_mask = negative_mask.int()\n\n # Subset to rows with at least K positive and at least K negative so we can form K triplets per row\n subset_idx = (positive_mask.sum(1) >= self.triplets_per_item) & (negative_mask.sum(1) >= self.triplets_per_item)\n if subset_idx.sum() == 0:\n logger.warning(f\"Not enough triplets per item (wanted: {self.triplets_per_item})\")\n return torch.tensor(0.0).to(embeddings.device)\n\n pairwise_dist = pairwise_dist[subset_idx, :]\n positive_mask = positive_mask[subset_idx, :]\n negative_mask = negative_mask[subset_idx, :]\n\n # The masks contain all \"0\" and \"1\" integers.\n # topk returns indices of first K \"1\" values in each row\n # Since batch contains shuffled items, the first K neighbors are random\n first_k_positive_idx = positive_mask.topk(self.triplets_per_item, dim=1, sorted=False).indices\n first_k_negative_idx = negative_mask.topk(self.triplets_per_item, dim=1, sorted=False).indices\n\n anchor_positive_dist = pairwise_dist.gather(1, first_k_positive_idx)\n anchor_negative_dist = pairwise_dist.gather(1, first_k_negative_idx)\n triplet_loss = F.relu(anchor_positive_dist - anchor_negative_dist + self.margin, inplace=True).mean()\n\n return triplet_loss"
},
{
"identifier": "TripletLossHardMining",
"path": "com_hom_emg/loss.py",
"snippet": "class TripletLossHardMining(nn.Module):\n \"\"\"The farthest positive and the closest negative item are used for a triplet\"\"\"\n\n # see:\n # https://omoindrot.github.io/triplet-loss\n # https://arxiv.org/abs/1703.07737\n # https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py\n def __init__(self, margin: float):\n super().__init__()\n self.margin = margin\n\n def forward(\n self,\n real_double_features: torch.Tensor,\n fake_double_features: torch.Tensor,\n real_double_labels: torch.Tensor,\n fake_double_labels: torch.Tensor,\n ):\n assert len(real_double_features) == len(real_double_labels)\n assert len(fake_double_features) == len(fake_double_labels)\n assert len(real_double_features) > 0\n assert len(fake_double_features) > 0\n\n embeddings = torch.cat([real_double_features, fake_double_features], dim=0)\n labels = torch.cat([real_double_labels, fake_double_labels], dim=0)\n device = embeddings.device\n is_real = torch.cat(\n [\n torch.ones(len(real_double_labels), device=device),\n torch.zeros(len(fake_double_labels), device=device),\n ],\n )\n pairwise_dist = torch.cdist(embeddings, embeddings)\n\n # Masks: for each row, which items are valid as a positive or negative item\n # Positive items: same label, opposite realness\n # Negative items: diff label, opposite realness\n positive_mask, negative_mask = get_masks(labels, is_real)\n positive_mask = positive_mask.float()\n negative_mask = negative_mask.float()\n\n # Subset to rows with at least 1 positive and at least 1 negative so we can form a triplet\n subset_idx = (positive_mask.sum(1) > 0) & (negative_mask.sum(1) > 0)\n if subset_idx.sum() == 0:\n return torch.tensor(0.0).to(embeddings.device)\n pairwise_dist = pairwise_dist[subset_idx, :]\n positive_mask = positive_mask[subset_idx, :]\n negative_mask = negative_mask[subset_idx, :]\n\n # Use mask to zero out any distances where (a, p) not valid.\n # (a, p) is valid if label(a) == label(p) and is_real(a) != is_real(p)\n # Thus when we select the largest dist, we'll select a valid positive\n anchor_positive_dist = positive_mask * pairwise_dist\n # shape (batch_size, 1)\n hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)\n\n # For each anchor, get the hardest negative\n # We add the maximum value in each row to the invalid negatives (label(a) == label(n))\n # Thus when we select the minimum dist, we'll select a valid negative\n max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)\n anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - negative_mask)\n\n # shape (batch_size,)\n hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)\n\n # Combine biggest d(a, p) and smallest d(a, n) into final triplet loss\n triplet_loss = F.relu(hardest_positive_dist - hardest_negative_dist + self.margin, inplace=True).mean()\n return triplet_loss"
},
{
"identifier": "get_combo_conf_mat",
"path": "com_hom_emg/scoring.py",
"snippet": "def get_combo_conf_mat(y_true_2d, y_pred_2d, normalize=True):\n \"\"\"We get a confusion matrix of shape (25, 25). Row is true class, col is predicted.\n Entries are arranged like this:\n (D1, None), ..., (D4, None), (None, M1), ..., (None, M4), (D1, M1), ...\n (D1, M4), (D2, M1), ... (D2, M4), ... (D4, M4), (None, None)\n where D1 ... D4 are directions in order of appearance from DIRECTION_GESTURES\n and M1 ... M4 are modifiers in order of appearance from MODIFIER_GESTURES.\n This means the first 4 rows are each \"direction-only\" label, next 4 are \"modifier-only\" labels.\"\"\"\n cm = np.zeros((len(CANONICAL_COORDS), len(CANONICAL_COORDS)))\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[CANONICAL_COORDS.index(tuple(yt)), CANONICAL_COORDS.index(tuple(yp))] += 1\n if normalize:\n # NOTE - result may contain nans - use nanmean later\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm"
},
{
"identifier": "PROJECT_PATH",
"path": "com_hom_emg/utils.py",
"snippet": "PROJECT_PATH = Path(__file__).parent.parent"
}
] | from copy import deepcopy
from itertools import chain, product
from pathlib import Path
from loguru import logger
from pytorch_lightning.loggers import TensorBoardLogger
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from torchmetrics.functional import accuracy
from vit_pytorch.simple_vit_1d import SimpleViT
from .basic_arch import EmbeddingNetwork, UnitNormLayer
from .conformer import Conformer
from .data import DataModule, get_per_subj_data, shuffle_together
from .loss import TripletCentroids, TripletLoss, TripletLossHardMining
from .scoring import get_combo_conf_mat
from .utils import PROJECT_PATH
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F | 6,523 | nn.Linear(feature_dim, feature_dim),
)
def forward(self, x1, x2, y1, y2):
y1 = F.one_hot(y1, num_classes=5)
y2 = F.one_hot(y2, num_classes=5)
avg = (x1 + x2) / 2
mlp_out = self.layer(torch.cat((x1, x2, y1, y2), dim=-1))
return avg + mlp_out
class CombinePairs(nn.Module):
def __init__(self, combine_fn: nn.Module, normalized_features: bool):
super().__init__()
self.normalized_features = normalized_features
self.combine_fn = combine_fn
def forward(self, x, y):
# Expects data and labels from single gestures
# Labels have the form (direction, modifier)
# where direction in 0, 1, 2, 3 is active, and 4 is NoDir
# same for modifier
device = x.device
dir_idx = y[:, 1] == 4 # When modifier is NoMod
mod_idx = y[:, 0] == 4 # When direction is NoDir
x_dir = x[dir_idx]
y_dir = y[dir_idx, 0]
x_mod = x[mod_idx]
y_mod = y[mod_idx, 1]
if len(x_dir) * len(x_mod) <= 1:
raise InsufficientDataError()
all_x1, all_x2, all_y1, all_y2 = [], [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
all_x1.append(x1)
all_x2.append(x2)
all_y1.append(y1)
all_y2.append(y2)
all_x1 = torch.stack(all_x1)
all_x2 = torch.stack(all_x2)
all_y1 = torch.stack(all_y1).to(device)
all_y2 = torch.stack(all_y2).to(device)
x_aug = self.combine_fn(all_x1, all_x2, all_y1, all_y2)
y_aug = torch.stack((all_y1, all_y2), dim=-1)
if self.normalized_features:
x_aug = F.normalize(x_aug, dim=-1)
return x_aug, y_aug
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def get_noise(x, desired_SNR):
x_std = x.std()
# SNR = 10 * log10 ( (signal_power) / (noise_power) )
# where signal_power = data_std**2 and noise_power = noise_std**2,
# and SNR is passed as argparse param
noise_std = x_std / (10 ** (desired_SNR / 20))
return torch.randn_like(x) * noise_std
class LearnedEmbedding(pl.LightningModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("LearnedEmbedding")
parser.add_argument("--encoder_arch", choices=["basic", "conformer", "vit", "identity"], default="basic")
parser.add_argument("--clf_arch", choices=["small", "large"], default="small")
parser.add_argument("--feature_dim", type=int, default=64)
# Note that with normalized features, we might need to re-normalized after making combinations
parser.add_argument("--data_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise.")
parser.add_argument(
"--feature_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise."
)
parser.add_argument("--normalized_features", type=str2bool, default=False)
parser.add_argument("--feature_combine_type", choices=["avg", "mlp"], default="avg")
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--lr_decay", type=float, default=1.0)
parser.add_argument("--linearity_loss_coeff", type=float, default=1.0)
parser.add_argument("--real_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--fake_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--loss_type", choices=["triplet", "triplet-centroids", "triplet-hard"], default="triplet")
parser.add_argument("--margin", type=float, default=1.0)
parser.add_argument("--centroids_momentum", type=float, default=0.75, help="For `triplet-centroids` loss")
parser.add_argument("--triplets_per_item", type=int, default=1, help="For `triplet` loss")
parser = parent_parser.add_argument_group("LearnedEmbedding - Fine-tuning")
parser.add_argument("--finetune_steps", type=int, default=10_000)
parser.add_argument("--finetune_lr", type=float, default=3e-5)
parser.add_argument("--finetune_lr_decay", type=float, default=1.0)
parser.add_argument("--finetune_batch_size", type=float, default=32)
parser.add_argument("--finetune_test_frac", type=float, default=0.2)
parser.add_argument("--finetune_n_aug_per_class", type=int, default=-1, help="-1 for all, positive for N")
return parent_parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters() # Access arg from command line "--arg1" at "self.hparams.arg1", etc
# NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph
self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)
if self.hparams.encoder_arch == "basic":
self.embedding = EmbeddingNetwork(
input_channels=self.hparams.input_channels,
input_time_length=self.hparams.input_time_length,
feature_dim=self.hparams.feature_dim,
normalized_features=self.hparams.normalized_features,
use_preprocessed_data=self.hparams.use_preprocessed_data,
)
elif self.hparams.encoder_arch == "conformer":
|
class InsufficientDataError(Exception):
...
class DummyIdentity(nn.Module):
# A null embedding. Has a single (unused) parameter to easily use in the same pl training loop
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor(0.0))
def forward(self, x):
return x.flatten(1)
class MLPClf(nn.Sequential):
def __init__(self, input_dim, output_dim):
layers = [
nn.Linear(input_dim, input_dim * 2, bias=False),
nn.BatchNorm1d(input_dim * 2),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim * 2, input_dim, bias=False),
nn.BatchNorm1d(input_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim, output_dim),
]
super().__init__(*layers)
class Avg(nn.Module):
def forward(self, x1, x2, _y1, _y2):
# Note that vector average is elementwise; thus we don't care
# if we have a pair of single vectors or a pair of batches
return (x1 + x2) / 2
class MLPCombine(nn.Module):
def __init__(self, feature_dim):
super().__init__()
self.layer = nn.Sequential(
# Input takes 2 feature vectors, and 2 labels (each one-hot with 5 classes)
nn.Linear(feature_dim * 2 + 5 * 2, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim),
)
def forward(self, x1, x2, y1, y2):
y1 = F.one_hot(y1, num_classes=5)
y2 = F.one_hot(y2, num_classes=5)
avg = (x1 + x2) / 2
mlp_out = self.layer(torch.cat((x1, x2, y1, y2), dim=-1))
return avg + mlp_out
class CombinePairs(nn.Module):
def __init__(self, combine_fn: nn.Module, normalized_features: bool):
super().__init__()
self.normalized_features = normalized_features
self.combine_fn = combine_fn
def forward(self, x, y):
# Expects data and labels from single gestures
# Labels have the form (direction, modifier)
# where direction in 0, 1, 2, 3 is active, and 4 is NoDir
# same for modifier
device = x.device
dir_idx = y[:, 1] == 4 # When modifier is NoMod
mod_idx = y[:, 0] == 4 # When direction is NoDir
x_dir = x[dir_idx]
y_dir = y[dir_idx, 0]
x_mod = x[mod_idx]
y_mod = y[mod_idx, 1]
if len(x_dir) * len(x_mod) <= 1:
raise InsufficientDataError()
all_x1, all_x2, all_y1, all_y2 = [], [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
all_x1.append(x1)
all_x2.append(x2)
all_y1.append(y1)
all_y2.append(y2)
all_x1 = torch.stack(all_x1)
all_x2 = torch.stack(all_x2)
all_y1 = torch.stack(all_y1).to(device)
all_y2 = torch.stack(all_y2).to(device)
x_aug = self.combine_fn(all_x1, all_x2, all_y1, all_y2)
y_aug = torch.stack((all_y1, all_y2), dim=-1)
if self.normalized_features:
x_aug = F.normalize(x_aug, dim=-1)
return x_aug, y_aug
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def get_noise(x, desired_SNR):
x_std = x.std()
# SNR = 10 * log10 ( (signal_power) / (noise_power) )
# where signal_power = data_std**2 and noise_power = noise_std**2,
# and SNR is passed as argparse param
noise_std = x_std / (10 ** (desired_SNR / 20))
return torch.randn_like(x) * noise_std
class LearnedEmbedding(pl.LightningModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("LearnedEmbedding")
parser.add_argument("--encoder_arch", choices=["basic", "conformer", "vit", "identity"], default="basic")
parser.add_argument("--clf_arch", choices=["small", "large"], default="small")
parser.add_argument("--feature_dim", type=int, default=64)
# Note that with normalized features, we might need to re-normalized after making combinations
parser.add_argument("--data_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise.")
parser.add_argument(
"--feature_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise."
)
parser.add_argument("--normalized_features", type=str2bool, default=False)
parser.add_argument("--feature_combine_type", choices=["avg", "mlp"], default="avg")
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--lr_decay", type=float, default=1.0)
parser.add_argument("--linearity_loss_coeff", type=float, default=1.0)
parser.add_argument("--real_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--fake_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--loss_type", choices=["triplet", "triplet-centroids", "triplet-hard"], default="triplet")
parser.add_argument("--margin", type=float, default=1.0)
parser.add_argument("--centroids_momentum", type=float, default=0.75, help="For `triplet-centroids` loss")
parser.add_argument("--triplets_per_item", type=int, default=1, help="For `triplet` loss")
parser = parent_parser.add_argument_group("LearnedEmbedding - Fine-tuning")
parser.add_argument("--finetune_steps", type=int, default=10_000)
parser.add_argument("--finetune_lr", type=float, default=3e-5)
parser.add_argument("--finetune_lr_decay", type=float, default=1.0)
parser.add_argument("--finetune_batch_size", type=float, default=32)
parser.add_argument("--finetune_test_frac", type=float, default=0.2)
parser.add_argument("--finetune_n_aug_per_class", type=int, default=-1, help="-1 for all, positive for N")
return parent_parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters() # Access arg from command line "--arg1" at "self.hparams.arg1", etc
# NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph
self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)
if self.hparams.encoder_arch == "basic":
self.embedding = EmbeddingNetwork(
input_channels=self.hparams.input_channels,
input_time_length=self.hparams.input_time_length,
feature_dim=self.hparams.feature_dim,
normalized_features=self.hparams.normalized_features,
use_preprocessed_data=self.hparams.use_preprocessed_data,
)
elif self.hparams.encoder_arch == "conformer": | self.embedding = Conformer( | 2 | 2023-11-01 21:12:05+00:00 | 8k |
openai/weak-to-strong | train_simple.py | [
{
"identifier": "get_tokenizer",
"path": "weak_to_strong/common.py",
"snippet": "def get_tokenizer(model_name: str):\n \"\"\"\n This function returns a tokenizer based on the model name.\n\n Parameters:\n model_name: The name of the model for which the tokenizer is needed.\n\n Returns:\n A tokenizer for the specified model.\n \"\"\"\n return AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)"
},
{
"identifier": "VALID_DATASETS",
"path": "weak_to_strong/datasets.py",
"snippet": "VALID_DATASETS: list[str] = list(_REGISTRY.keys())"
},
{
"identifier": "load_dataset",
"path": "weak_to_strong/datasets.py",
"snippet": "def load_dataset(ds_name: str, seed: int = 0, split_sizes: Optional[dict] = None):\n if split_sizes is None:\n split_sizes = dict(train=None, test=None)\n\n if ds_name not in _REGISTRY:\n raise ValueError(f\"Unknown dataset {ds_name}, please register\")\n cfg = _REGISTRY[ds_name]\n results = {}\n for split, n_docs in split_sizes.items():\n ds = cfg.loader(split)\n try:\n ds = ds.select(range(n_docs))\n except IndexError as e:\n print(f\"Warning {ds_name} has less than {n_docs} docs, using all: {e}\")\n ds = ds.map(functools.partial(cfg.formatter, rng=Random(seed)))\n ds = ds.map(\n lambda ex: {\"soft_label\": [1 - float(ex[\"hard_label\"]), float(ex[\"hard_label\"])]}\n )\n ds = ds.shuffle(seed=seed) # shuffling a bit pointless for test set but wtv\n results[split] = ds\n return results"
},
{
"identifier": "tokenize_dataset",
"path": "weak_to_strong/datasets.py",
"snippet": "def tokenize_dataset(\n raw_ds: HfDataset,\n tokenizer: Callable,\n max_ctx: int,\n):\n \"\"\"\n This function prepares the dataset for training. It takes the raw dataset, a formatting function,\n a tokenizer, a maximum context length\n\n Parameters:\n raw_ds: The raw dataset to be processed.\n tokenizer: The tokenizer to be used on the formatted dataset.\n max_ctx: The maximum context length for the tokenizer.\n\n Returns:\n ds: The processed and shuffled dataset ready for training.\n \"\"\"\n\n def process_function(res):\n toks = tokenizer(res[\"txt\"])\n return dict(\n input_ids=toks[\"input_ids\"],\n )\n\n ds = raw_ds.map(process_function, batched=False).filter(lambda x: len(x[\"input_ids\"]) < max_ctx)\n return ds"
},
{
"identifier": "logconf_loss_fn",
"path": "weak_to_strong/loss.py",
"snippet": "class logconf_loss_fn(LossFnBase):\n \"\"\"\n This class defines a custom loss function for log confidence.\n\n Attributes:\n aux_coef: A float indicating the auxiliary coefficient.\n warmup_frac: A float indicating the fraction of total training steps for warmup.\n \"\"\"\n\n def __init__(\n self,\n aux_coef: float = 0.5,\n warmup_frac: float = 0.1, # in terms of fraction of total training steps\n ):\n self.aux_coef = aux_coef\n self.warmup_frac = warmup_frac\n\n def __call__(\n self,\n logits: torch.Tensor,\n labels: torch.Tensor,\n step_frac: float,\n ) -> torch.Tensor:\n logits = logits.float()\n labels = labels.float()\n coef = 1.0 if step_frac > self.warmup_frac else step_frac\n coef = coef * self.aux_coef\n preds = torch.softmax(logits, dim=-1)\n mean_weak = torch.mean(labels, dim=0)\n assert mean_weak.shape == (2,)\n threshold = torch.quantile(preds[:, 0], mean_weak[1])\n strong_preds = torch.cat(\n [(preds[:, 0] >= threshold)[:, None], (preds[:, 0] < threshold)[:, None]],\n dim=1,\n )\n target = labels * (1 - coef) + strong_preds.detach() * coef\n loss = torch.nn.functional.cross_entropy(logits, target, reduction=\"none\")\n return loss.mean()"
},
{
"identifier": "product_loss_fn",
"path": "weak_to_strong/loss.py",
"snippet": "class product_loss_fn(LossFnBase):\n \"\"\"\n This class defines a custom loss function for product of predictions and labels.\n\n Attributes:\n alpha: A float indicating how much to weigh the weak model.\n beta: A float indicating how much to weigh the strong model.\n warmup_frac: A float indicating the fraction of total training steps for warmup.\n \"\"\"\n\n def __init__(\n self,\n alpha: float = 1.0, # how much to weigh the weak model\n beta: float = 1.0, # how much to weigh the strong model\n warmup_frac: float = 0.1, # in terms of fraction of total training steps\n ):\n self.alpha = alpha\n self.beta = beta\n self.warmup_frac = warmup_frac\n\n def __call__(\n self,\n logits: torch.Tensor,\n labels: torch.Tensor,\n step_frac: float,\n ) -> torch.Tensor:\n preds = torch.softmax(logits, dim=-1)\n target = torch.pow(preds, self.beta) * torch.pow(labels, self.alpha)\n target /= target.sum(dim=-1, keepdim=True)\n target = target.detach()\n loss = torch.nn.functional.cross_entropy(logits, target, reduction=\"none\")\n return loss.mean()"
},
{
"identifier": "xent_loss",
"path": "weak_to_strong/loss.py",
"snippet": "class xent_loss(LossFnBase):\n def __call__(\n self, logits: torch.Tensor, labels: torch.Tensor, step_frac: float\n ) -> torch.Tensor:\n \"\"\"\n This function calculates the cross entropy loss between logits and labels.\n\n Parameters:\n logits: The predicted values.\n labels: The actual values.\n step_frac: The fraction of total training steps completed.\n\n Returns:\n The mean of the cross entropy loss.\n \"\"\"\n loss = torch.nn.functional.cross_entropy(logits, labels)\n return loss.mean()"
},
{
"identifier": "ModelConfig",
"path": "weak_to_strong/train.py",
"snippet": "class ModelConfig:\n name: str\n default_lr: float\n eval_batch_size: int\n custom_kwargs: Optional[dict] = None\n gradient_checkpointing: bool = False\n model_parallel: bool = False\n default_optimizer: str = \"adam\""
},
{
"identifier": "train_and_save_model",
"path": "weak_to_strong/train.py",
"snippet": "def train_and_save_model(\n model_config: ModelConfig,\n train_ds: datasets.Dataset,\n test_ds: datasets.Dataset,\n inference_ds: Optional[datasets.Dataset] = None,\n *,\n batch_size: int,\n lr: float,\n epochs: int,\n eval_batch_size: Optional[int] = None,\n minibatch_size_per_device: Optional[int] = None,\n save_path: Optional[str] = None,\n loss_fn: Callable = xent_loss,\n label: str = \"default\",\n force_retrain: bool = False,\n train_with_dropout: bool = False,\n linear_probe: bool = False,\n lr_schedule: str = \"constant\",\n optimizer_name: str = \"adam\",\n eval_every: Optional[int] = None,\n):\n if eval_batch_size is None:\n eval_batch_size = batch_size\n\n if minibatch_size_per_device is None:\n minibatch_size_per_device = 1\n\n gradient_checkpointing = model_config.gradient_checkpointing\n custom_kwargs = model_config.custom_kwargs or {}\n\n def maybe_load_model(model):\n if os.path.exists(os.path.join(save_path, \"results.pkl\")) and not force_retrain:\n print(\"loading from\", save_path)\n checkpoint_path = os.path.join(save_path, \"pytorch_model.bin\")\n if not os.path.exists(checkpoint_path):\n # Assume this means we have a sharded checkpoint, and load it appropriately\n load_sharded_checkpoint(model, checkpoint_path)\n else:\n state_dict = torch.load(os.path.join(save_path, \"pytorch_model.bin\"))\n state_dict = {\n k.replace(\"transformer.module\", \"transformer\"): v\n for (k, v) in state_dict.items()\n }\n custom_kwargs[\"state_dict\"] = state_dict\n return True\n return False\n\n already_trained = False\n # Load the model\n if model_config.model_parallel:\n assert torch.cuda.device_count() > 1, f\"you might want more gpus for {model_config.name}\"\n model = TransformerWithHead.from_pretrained(\n model_config.name,\n num_labels=2,\n device_map=\"auto\",\n linear_probe=linear_probe,\n **custom_kwargs,\n )\n already_trained = maybe_load_model(model)\n # slight misnomer, more like minibatch_size_per_dp_replica\n minibatch_size = minibatch_size_per_device\n else:\n model = TransformerWithHead.from_pretrained(\n model_config.name, num_labels=2, linear_probe=linear_probe, **custom_kwargs\n ).to(\"cuda\")\n already_trained = maybe_load_model(model)\n # data parallel: currently not supported with model parallel\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model, output_device=0)\n minibatch_size = min(minibatch_size_per_device * torch.cuda.device_count(), batch_size)\n print(\n \"Using\",\n torch.cuda.device_count(),\n \"GPUs, setting minibatch_size to\",\n minibatch_size,\n )\n else:\n minibatch_size = minibatch_size_per_device\n\n if already_trained:\n test_results = eval_model_acc(model, test_ds, eval_batch_size)\n else:\n start = time.time()\n test_results = train_model(\n model,\n train_ds,\n batch_size,\n lr=lr,\n epochs=epochs,\n eval_ds=test_ds,\n gradient_checkpointing=gradient_checkpointing,\n loss_fn=loss_fn,\n eval_batch_size=eval_batch_size,\n eval_every=eval_every,\n minibatch_size=minibatch_size,\n train_with_dropout=train_with_dropout,\n lr_schedule=lr_schedule,\n optimizer_name=optimizer_name,\n )\n print(\"Model training took\", time.time() - start, \"seconds\")\n if save_path:\n # Note: If the model is wrapped by DataParallel, we need to unwrap it before saving\n (model if hasattr(model, \"save_pretrained\") else model.module).save_pretrained(\n save_path\n )\n print(\"saved\", save_path)\n\n inference_results = None\n if inference_ds:\n inference_results = eval_model_acc(model, inference_ds, eval_batch_size)\n logger.logkv(\"inference_accuracy\", np.mean([r[\"acc\"] for r in inference_results]))\n\n if save_path:\n with open(os.path.join(save_path, \"results.pkl\"), \"wb\") as f:\n pickle.dump(\n {\n \"avg_acc_test\": float(np.mean([r[\"acc\"] for r in test_results])),\n \"avg_acc_inference\": float(\n np.mean([r[\"acc\"] for r in inference_results] if inference_results else [])\n ),\n \"test_results\": test_results,\n \"inference_results\": inference_results if inference_results else [],\n },\n f,\n )\n # try to clean up memory\n clear_mem()\n logger.shutdown()\n\n return test_results, inference_results"
}
] | import json
import os
import random
import subprocess
import fire
import numpy as np
import torch
import weak_to_strong.logger as logger
from typing import Dict, List, Optional
from datasets import load_dataset, load_from_disk
from weak_to_strong.common import get_tokenizer
from weak_to_strong.datasets import (VALID_DATASETS, load_dataset,
tokenize_dataset)
from weak_to_strong.loss import logconf_loss_fn, product_loss_fn, xent_loss
from weak_to_strong.train import ModelConfig, train_and_save_model | 5,115 | optim: Optional[str] = None,
epochs: int = 2,
force_retrain: bool = False,
seed: int = 0,
minibatch_size_per_device: Optional[float] = None,
train_with_dropout: bool = False,
results_folder: str = "/tmp/results",
linear_probe: bool = False,
lr_schedule: str = "cosine_anneal",
# Note: you can pass either weak_model_size or weak_labels_path. If you pass
# weak_model_size, we will guess the path to the weak labels based on the weak
# model. If you pass weak_labels_path, we will use that path instead.
# If you pass neither, we will train on ground truth.
weak_model_size: Optional[str] = None,
weak_labels_path: Optional[str] = None,
sweep_subfolder: str = "default",
# Set to a very large value so that by default we don't do any intermediate evals but
# still do final evals (which requires eval_every to be set to a non-zero, non-None value)
eval_every: int = 1000000,
sync_command: Optional[str] = None,
):
# this is per device!
if minibatch_size_per_device is None:
minibatch_size_per_device = 1
assert ds_name in VALID_DATASETS, f"Unknown dataset {ds_name} not in {VALID_DATASETS}"
assert (
weak_model_size is None or weak_labels_path is None
), "Can't pass both weak_model_size and weak_labels_path"
model_config = MODELS_DICT[model_size]
use_default_lr = False
if lr is None:
assert (
batch_size == 32
), "Learning rates were tuned on batch size 32, you probably want to sweep LR if you are tuning batch size"
lr = model_config.default_lr
use_default_lr = True
if optim is None:
optim = model_config.default_optimizer
# The commented out terms are the ones that should not change final results
config = {
"batch_size": batch_size,
"max_ctx": max_ctx,
"ds_name": ds_name,
"loss": loss,
"n_docs": n_docs,
"n_test_docs": n_test_docs,
"model_size": model_size,
"lr": lr,
"optim": optim,
"epochs": epochs,
# "force_retrain": force_retrain,
"seed": seed,
# "minibatch_size_per_device": minibatch_size_per_device,
"train_with_dropout": train_with_dropout,
# "results_folder": results_folder,
"linear_probe": linear_probe,
"lr_schedule": lr_schedule,
"eval_every": eval_every,
# "sweep_subfolder": sweep_subfolder,
}
if weak_model_size is not None:
weak_model_config = config.copy()
weak_model_config["model_size"] = weak_model_size
weak_model_config["loss"] = "xent"
if use_default_lr:
weak_model_config["lr"] = MODELS_DICT[weak_model_size].default_lr
weak_model_config_name = get_config_foldername(weak_model_config)
weak_labels_path = (
results_folder + "/" + sweep_subfolder + "/" + weak_model_config_name + "/weak_labels"
)
eval_batch_size = model_config.eval_batch_size
random.seed(seed)
# Load dataset
dataset = load_dataset(ds_name, seed=seed, split_sizes=dict(train=n_docs, test=n_test_docs))
# Split the training dataset in half
train_dataset, test_ds = dataset["train"], dataset["test"]
if weak_labels_path is None:
split_data = train_dataset.train_test_split(test_size=0.5, seed=seed)
train1_ds, train2_ds = split_data["train"], split_data["test"]
print("len(train1):", len(train1_ds), "len(train2):", len(train2_ds))
config_name = get_config_foldername(config)
else:
if not weak_labels_path.endswith("weak_labels"):
weak_labels_path = weak_labels_path + "/weak_labels"
if sync_command is not None:
sync_command_list = sync_command.split(" ")
sync_command_list.extend(
["download", weak_labels_path.replace("/weak_labels", ""), results_folder]
)
print(f"Running sync command: {' '.join(sync_command_list)}")
result = subprocess.run(sync_command_list, check=True)
if result.returncode != 0:
raise RuntimeError(f"Sync command failed with return code {result.returncode}")
train1_ds = load_from_disk(weak_labels_path)
train2_ds = None
weak_model_config = json.load(open(weak_labels_path.replace("weak_labels", "config.json")))
config["weak_model_size"] = weak_model_config["model_size"]
config_name = get_config_foldername(config)
config["weak_model"] = weak_model_config
save_path = os.path.join(results_folder, sweep_subfolder, config_name)
logger.configure(
name="{sweep_subfolder}_{config_name}_{datetime_now}",
save_path=save_path,
sweep_subfolder=sweep_subfolder,
config_name=config_name,
)
# Tokenize datasets
tokenizer = get_tokenizer(model_config.name)
|
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
# Should use model_parallel on V100s (note: ironically if you have a single V100 it should run,
# but if you have multiple it won't run without model_parallel because of the overhead of data
# parallel training).
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "5fde88dff770a7d036847211f5d9d9705f0caa69",
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "d4efd21e866b9cb3466cb65b963933f5e98016d1",
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this bf16 support and without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "8be2854218fea9054331e217fd26a06f3fd02004",
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "fec78c0e3b3b10dd9f0ce775c34a686a3255a7d1",
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = {
"logconf": logconf_loss_fn(),
"product": product_loss_fn(),
"xent": xent_loss(),
}
VALID_LOSSES: List[str] = list(loss_dict.keys())
def get_config_foldername(config: dict) -> str:
def shorten_key(key: str) -> str:
return "".join(word[0] for word in key.split("_"))
def shorten_value(value) -> str:
if isinstance(value, bool):
return "1" if value else "0"
elif isinstance(value, str):
value = value.split("/")[-1]
if "_" in value:
return "_".join(word[:4] for word in value.split("_"))
else:
return value
else:
return str(value)
return "-".join(f"{shorten_key(k)}={shorten_value(v)}" for k, v in sorted(config.items()))
def main(
batch_size: int = 32,
max_ctx: int = 1024,
ds_name: str = "sciq",
loss: str = "xent",
n_docs: int = 20000,
n_test_docs: int = 10000,
model_size: str = "gpt2",
lr: Optional[float] = None,
optim: Optional[str] = None,
epochs: int = 2,
force_retrain: bool = False,
seed: int = 0,
minibatch_size_per_device: Optional[float] = None,
train_with_dropout: bool = False,
results_folder: str = "/tmp/results",
linear_probe: bool = False,
lr_schedule: str = "cosine_anneal",
# Note: you can pass either weak_model_size or weak_labels_path. If you pass
# weak_model_size, we will guess the path to the weak labels based on the weak
# model. If you pass weak_labels_path, we will use that path instead.
# If you pass neither, we will train on ground truth.
weak_model_size: Optional[str] = None,
weak_labels_path: Optional[str] = None,
sweep_subfolder: str = "default",
# Set to a very large value so that by default we don't do any intermediate evals but
# still do final evals (which requires eval_every to be set to a non-zero, non-None value)
eval_every: int = 1000000,
sync_command: Optional[str] = None,
):
# this is per device!
if minibatch_size_per_device is None:
minibatch_size_per_device = 1
assert ds_name in VALID_DATASETS, f"Unknown dataset {ds_name} not in {VALID_DATASETS}"
assert (
weak_model_size is None or weak_labels_path is None
), "Can't pass both weak_model_size and weak_labels_path"
model_config = MODELS_DICT[model_size]
use_default_lr = False
if lr is None:
assert (
batch_size == 32
), "Learning rates were tuned on batch size 32, you probably want to sweep LR if you are tuning batch size"
lr = model_config.default_lr
use_default_lr = True
if optim is None:
optim = model_config.default_optimizer
# The commented out terms are the ones that should not change final results
config = {
"batch_size": batch_size,
"max_ctx": max_ctx,
"ds_name": ds_name,
"loss": loss,
"n_docs": n_docs,
"n_test_docs": n_test_docs,
"model_size": model_size,
"lr": lr,
"optim": optim,
"epochs": epochs,
# "force_retrain": force_retrain,
"seed": seed,
# "minibatch_size_per_device": minibatch_size_per_device,
"train_with_dropout": train_with_dropout,
# "results_folder": results_folder,
"linear_probe": linear_probe,
"lr_schedule": lr_schedule,
"eval_every": eval_every,
# "sweep_subfolder": sweep_subfolder,
}
if weak_model_size is not None:
weak_model_config = config.copy()
weak_model_config["model_size"] = weak_model_size
weak_model_config["loss"] = "xent"
if use_default_lr:
weak_model_config["lr"] = MODELS_DICT[weak_model_size].default_lr
weak_model_config_name = get_config_foldername(weak_model_config)
weak_labels_path = (
results_folder + "/" + sweep_subfolder + "/" + weak_model_config_name + "/weak_labels"
)
eval_batch_size = model_config.eval_batch_size
random.seed(seed)
# Load dataset
dataset = load_dataset(ds_name, seed=seed, split_sizes=dict(train=n_docs, test=n_test_docs))
# Split the training dataset in half
train_dataset, test_ds = dataset["train"], dataset["test"]
if weak_labels_path is None:
split_data = train_dataset.train_test_split(test_size=0.5, seed=seed)
train1_ds, train2_ds = split_data["train"], split_data["test"]
print("len(train1):", len(train1_ds), "len(train2):", len(train2_ds))
config_name = get_config_foldername(config)
else:
if not weak_labels_path.endswith("weak_labels"):
weak_labels_path = weak_labels_path + "/weak_labels"
if sync_command is not None:
sync_command_list = sync_command.split(" ")
sync_command_list.extend(
["download", weak_labels_path.replace("/weak_labels", ""), results_folder]
)
print(f"Running sync command: {' '.join(sync_command_list)}")
result = subprocess.run(sync_command_list, check=True)
if result.returncode != 0:
raise RuntimeError(f"Sync command failed with return code {result.returncode}")
train1_ds = load_from_disk(weak_labels_path)
train2_ds = None
weak_model_config = json.load(open(weak_labels_path.replace("weak_labels", "config.json")))
config["weak_model_size"] = weak_model_config["model_size"]
config_name = get_config_foldername(config)
config["weak_model"] = weak_model_config
save_path = os.path.join(results_folder, sweep_subfolder, config_name)
logger.configure(
name="{sweep_subfolder}_{config_name}_{datetime_now}",
save_path=save_path,
sweep_subfolder=sweep_subfolder,
config_name=config_name,
)
# Tokenize datasets
tokenizer = get_tokenizer(model_config.name) | train1_ds = tokenize_dataset(train1_ds, tokenizer, max_ctx) | 3 | 2023-12-13 23:53:13+00:00 | 8k |
linyiLYi/voice-assistant | whisper/transcribe.py | [
{
"identifier": "FRAMES_PER_SECOND",
"path": "whisper/audio.py",
"snippet": "FRAMES_PER_SECOND = SAMPLE_RATE // HOP_LENGTH # 10ms per audio frame"
},
{
"identifier": "HOP_LENGTH",
"path": "whisper/audio.py",
"snippet": "HOP_LENGTH = 160"
},
{
"identifier": "N_FRAMES",
"path": "whisper/audio.py",
"snippet": "N_FRAMES = N_SAMPLES // HOP_LENGTH # 3000 frames in a mel spectrogram input"
},
{
"identifier": "N_SAMPLES",
"path": "whisper/audio.py",
"snippet": "N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk"
},
{
"identifier": "SAMPLE_RATE",
"path": "whisper/audio.py",
"snippet": "SAMPLE_RATE = 16000"
},
{
"identifier": "log_mel_spectrogram",
"path": "whisper/audio.py",
"snippet": "def log_mel_spectrogram(\n audio: Union[str, np.ndarray],\n n_mels: int = 80,\n padding: int = 0,\n):\n \"\"\"\n Compute the log-Mel spectrogram of\n\n Parameters\n ----------\n audio: Union[str, np.ndarray, mx.array], shape = (*)\n The path to audio or either a NumPy or mlx array containing the audio waveform in 16 kHz\n\n n_mels: int\n The number of Mel-frequency filters, only 80 is supported\n\n padding: int\n Number of zero samples to pad to the right\n\n Returns\n -------\n mx.array, shape = (80, n_frames)\n An array that contains the Mel spectrogram\n \"\"\"\n device = mx.default_device()\n mx.set_default_device(mx.cpu)\n if not isinstance(audio, mx.array):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio = mx.array(audio)\n\n if padding > 0:\n audio = mx.pad(audio, (0, padding))\n window = hanning(N_FFT)\n freqs = stft(audio, window, nperseg=N_FFT, noverlap=HOP_LENGTH)\n magnitudes = freqs[:-1, :].abs().square()\n\n filters = mel_filters(n_mels)\n mel_spec = magnitudes @ filters.T\n\n log_spec = mx.maximum(mel_spec, 1e-10).log10()\n log_spec = mx.maximum(log_spec, log_spec.max() - 8.0)\n log_spec = (log_spec + 4.0) / 4.0\n mx.set_default_device(device)\n return log_spec"
},
{
"identifier": "pad_or_trim",
"path": "whisper/audio.py",
"snippet": "def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):\n \"\"\"\n Pad or trim the audio array to N_SAMPLES, as expected by the encoder.\n \"\"\"\n if array.shape[axis] > length:\n sl = [slice(None)] * array.ndim\n sl[axis] = slice(0, length)\n array = array[tuple(sl)]\n\n if array.shape[axis] < length:\n pad_widths = [(0, 0)] * array.ndim\n pad_widths[axis] = (0, length - array.shape[axis])\n pad_fn = mx.pad if isinstance(array, mx.array) else np.pad\n array = pad_fn(array, pad_widths)\n\n return array"
},
{
"identifier": "DecodingOptions",
"path": "whisper/decoding.py",
"snippet": "class DecodingOptions:\n # whether to perform X->X \"transcribe\" or X->English \"translate\"\n task: str = \"transcribe\"\n\n # language that the audio is in; uses detected language if None\n language: Optional[str] = None\n\n # sampling-related options\n temperature: float = 0.0\n sample_len: Optional[int] = None # maximum number of tokens to sample\n best_of: Optional[int] = None # number of independent sample trajectories, if t > 0\n beam_size: Optional[int] = None # number of beams in beam search, if t == 0\n patience: Optional[float] = None # patience in beam search (arxiv:2204.05424)\n\n # \"alpha\" in Google NMT, or None for length norm, when ranking generations\n # to select which to return among the beams or best-of-N samples\n length_penalty: Optional[float] = None\n\n # text or tokens to feed as the prompt or the prefix; for more info:\n # https://github.com/openai/whisper/discussions/117#discussioncomment-3727051\n prompt: Optional[Union[str, List[int]]] = None # for the previous context\n prefix: Optional[Union[str, List[int]]] = None # to prefix the current context\n\n # list of tokens ids (or comma-separated token ids) to suppress\n # \"-1\" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`\n suppress_tokens: Optional[Union[str, Iterable[int]]] = \"-1\"\n suppress_blank: bool = True # this will suppress blank outputs\n\n # timestamp sampling options\n without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only\n max_initial_timestamp: Optional[float] = 1.0\n\n # implementation details\n fp16: bool = True # use fp16 for most of the calculation"
},
{
"identifier": "DecodingResult",
"path": "whisper/decoding.py",
"snippet": "class DecodingResult:\n audio_features: mx.array\n language: str\n language_probs: Optional[Dict[str, float]] = None\n tokens: List[int] = field(default_factory=list)\n text: str = \"\"\n avg_logprob: float = np.nan\n no_speech_prob: float = np.nan\n temperature: float = np.nan\n compression_ratio: float = np.nan"
},
{
"identifier": "load_model",
"path": "whisper/load_models.py",
"snippet": "def load_model(\n path_or_hf_repo: str,\n dtype: mx.Dtype = mx.float32,\n) -> whisper.Whisper:\n model_path = Path(path_or_hf_repo)\n if not model_path.exists():\n model_path = Path(\n snapshot_download(\n repo_id=path_or_hf_repo\n )\n )\n\n with open(str(model_path / \"config.json\"), \"r\") as f:\n config = json.loads(f.read())\n config.pop(\"model_type\", None)\n quantization = config.pop(\"quantization\", None)\n\n model_args = whisper.ModelDimensions(**config)\n\n weights = mx.load(str(model_path / \"weights.npz\"))\n weights = tree_unflatten(list(weights.items()))\n\n model = whisper.Whisper(model_args, dtype)\n\n if quantization is not None:\n nn.QuantizedLinear.quantize_module(model, **quantization)\n\n model.update(weights)\n mx.eval(model.parameters())\n return model"
},
{
"identifier": "add_word_timestamps",
"path": "whisper/timing.py",
"snippet": "def add_word_timestamps(\n *,\n segments: List[dict],\n model: \"Whisper\",\n tokenizer: Tokenizer,\n mel: mx.array,\n num_frames: int,\n prepend_punctuations: str = \"\\\"'“¿([{-\",\n append_punctuations: str = \"\\\"'.。,,!!??::”)]}、\",\n last_speech_timestamp: float,\n **kwargs,\n):\n if len(segments) == 0:\n return\n\n text_tokens_per_segment = [\n [token for token in segment[\"tokens\"] if token < tokenizer.eot]\n for segment in segments\n ]\n\n text_tokens = list(itertools.chain.from_iterable(text_tokens_per_segment))\n alignment = find_alignment(model, tokenizer, text_tokens, mel, num_frames, **kwargs)\n word_durations = np.array([t.end - t.start for t in alignment])\n word_durations = word_durations[word_durations.nonzero()]\n median_duration = np.median(word_durations) if len(word_durations) > 0 else 0.0\n median_duration = min(0.7, float(median_duration))\n max_duration = median_duration * 2\n\n # hack: truncate long words at sentence boundaries.\n # a better segmentation algorithm based on VAD should be able to replace this.\n if len(word_durations) > 0:\n sentence_end_marks = \".。!!??\"\n # ensure words at sentence boundaries are not longer than twice the median word duration.\n for i in range(1, len(alignment)):\n if alignment[i].end - alignment[i].start > max_duration:\n if alignment[i].word in sentence_end_marks:\n alignment[i].end = alignment[i].start + max_duration\n elif alignment[i - 1].word in sentence_end_marks:\n alignment[i].start = alignment[i].end - max_duration\n\n merge_punctuations(alignment, prepend_punctuations, append_punctuations)\n\n time_offset = segments[0][\"seek\"] * HOP_LENGTH / SAMPLE_RATE\n word_index = 0\n\n for segment, text_tokens in zip(segments, text_tokens_per_segment):\n saved_tokens = 0\n words = []\n\n while word_index < len(alignment) and saved_tokens < len(text_tokens):\n timing = alignment[word_index]\n\n if timing.word:\n words.append(\n dict(\n word=timing.word,\n start=round(time_offset + timing.start, 2),\n end=round(time_offset + timing.end, 2),\n probability=timing.probability,\n )\n )\n\n saved_tokens += len(timing.tokens)\n word_index += 1\n\n # hack: truncate long words at segment boundaries.\n # a better segmentation algorithm based on VAD should be able to replace this.\n if len(words) > 0:\n # ensure the first and second word after a pause is not longer than\n # twice the median word duration.\n if words[0][\"end\"] - last_speech_timestamp > median_duration * 4 and (\n words[0][\"end\"] - words[0][\"start\"] > max_duration\n or (\n len(words) > 1\n and words[1][\"end\"] - words[0][\"start\"] > max_duration * 2\n )\n ):\n if (\n len(words) > 1\n and words[1][\"end\"] - words[1][\"start\"] > max_duration\n ):\n boundary = max(words[1][\"end\"] / 2, words[1][\"end\"] - max_duration)\n words[0][\"end\"] = words[1][\"start\"] = boundary\n words[0][\"start\"] = max(0, words[0][\"end\"] - max_duration)\n\n # prefer the segment-level start timestamp if the first word is too long.\n if (\n segment[\"start\"] < words[0][\"end\"]\n and segment[\"start\"] - 0.5 > words[0][\"start\"]\n ):\n words[0][\"start\"] = max(\n 0, min(words[0][\"end\"] - median_duration, segment[\"start\"])\n )\n else:\n segment[\"start\"] = words[0][\"start\"]\n\n # prefer the segment-level end timestamp if the last word is too long.\n if (\n segment[\"end\"] > words[-1][\"start\"]\n and segment[\"end\"] + 0.5 < words[-1][\"end\"]\n ):\n words[-1][\"end\"] = max(\n words[-1][\"start\"] + median_duration, segment[\"end\"]\n )\n else:\n segment[\"end\"] = words[-1][\"end\"]\n\n last_speech_timestamp = segment[\"end\"]\n\n segment[\"words\"] = words"
},
{
"identifier": "LANGUAGES",
"path": "whisper/tokenizer.py",
"snippet": "LANGUAGES = {\n \"en\": \"english\",\n \"zh\": \"chinese\",\n \"de\": \"german\",\n \"es\": \"spanish\",\n \"ru\": \"russian\",\n \"ko\": \"korean\",\n \"fr\": \"french\",\n \"ja\": \"japanese\",\n \"pt\": \"portuguese\",\n \"tr\": \"turkish\",\n \"pl\": \"polish\",\n \"ca\": \"catalan\",\n \"nl\": \"dutch\",\n \"ar\": \"arabic\",\n \"sv\": \"swedish\",\n \"it\": \"italian\",\n \"id\": \"indonesian\",\n \"hi\": \"hindi\",\n \"fi\": \"finnish\",\n \"vi\": \"vietnamese\",\n \"he\": \"hebrew\",\n \"uk\": \"ukrainian\",\n \"el\": \"greek\",\n \"ms\": \"malay\",\n \"cs\": \"czech\",\n \"ro\": \"romanian\",\n \"da\": \"danish\",\n \"hu\": \"hungarian\",\n \"ta\": \"tamil\",\n \"no\": \"norwegian\",\n \"th\": \"thai\",\n \"ur\": \"urdu\",\n \"hr\": \"croatian\",\n \"bg\": \"bulgarian\",\n \"lt\": \"lithuanian\",\n \"la\": \"latin\",\n \"mi\": \"maori\",\n \"ml\": \"malayalam\",\n \"cy\": \"welsh\",\n \"sk\": \"slovak\",\n \"te\": \"telugu\",\n \"fa\": \"persian\",\n \"lv\": \"latvian\",\n \"bn\": \"bengali\",\n \"sr\": \"serbian\",\n \"az\": \"azerbaijani\",\n \"sl\": \"slovenian\",\n \"kn\": \"kannada\",\n \"et\": \"estonian\",\n \"mk\": \"macedonian\",\n \"br\": \"breton\",\n \"eu\": \"basque\",\n \"is\": \"icelandic\",\n \"hy\": \"armenian\",\n \"ne\": \"nepali\",\n \"mn\": \"mongolian\",\n \"bs\": \"bosnian\",\n \"kk\": \"kazakh\",\n \"sq\": \"albanian\",\n \"sw\": \"swahili\",\n \"gl\": \"galician\",\n \"mr\": \"marathi\",\n \"pa\": \"punjabi\",\n \"si\": \"sinhala\",\n \"km\": \"khmer\",\n \"sn\": \"shona\",\n \"yo\": \"yoruba\",\n \"so\": \"somali\",\n \"af\": \"afrikaans\",\n \"oc\": \"occitan\",\n \"ka\": \"georgian\",\n \"be\": \"belarusian\",\n \"tg\": \"tajik\",\n \"sd\": \"sindhi\",\n \"gu\": \"gujarati\",\n \"am\": \"amharic\",\n \"yi\": \"yiddish\",\n \"lo\": \"lao\",\n \"uz\": \"uzbek\",\n \"fo\": \"faroese\",\n \"ht\": \"haitian creole\",\n \"ps\": \"pashto\",\n \"tk\": \"turkmen\",\n \"nn\": \"nynorsk\",\n \"mt\": \"maltese\",\n \"sa\": \"sanskrit\",\n \"lb\": \"luxembourgish\",\n \"my\": \"myanmar\",\n \"bo\": \"tibetan\",\n \"tl\": \"tagalog\",\n \"mg\": \"malagasy\",\n \"as\": \"assamese\",\n \"tt\": \"tatar\",\n \"haw\": \"hawaiian\",\n \"ln\": \"lingala\",\n \"ha\": \"hausa\",\n \"ba\": \"bashkir\",\n \"jw\": \"javanese\",\n \"su\": \"sundanese\",\n \"yue\": \"cantonese\",\n}"
},
{
"identifier": "get_tokenizer",
"path": "whisper/tokenizer.py",
"snippet": "@lru_cache(maxsize=None)\ndef get_tokenizer(\n multilingual: bool,\n *,\n num_languages: int = 99,\n language: Optional[str] = None,\n task: Optional[str] = None, # Literal[\"transcribe\", \"translate\", None]\n) -> Tokenizer:\n if language is not None:\n language = language.lower()\n if language not in LANGUAGES:\n if language in TO_LANGUAGE_CODE:\n language = TO_LANGUAGE_CODE[language]\n else:\n raise ValueError(f\"Unsupported language: {language}\")\n\n if multilingual:\n encoding_name = \"multilingual\"\n language = language or \"en\"\n task = task or \"transcribe\"\n else:\n encoding_name = \"gpt2\"\n language = None\n task = None\n\n encoding = get_encoding(name=encoding_name, num_languages=num_languages)\n\n return Tokenizer(\n encoding=encoding, num_languages=num_languages, language=language, task=task\n )"
}
] | import sys
import warnings
import mlx.core as mx
import numpy as np
import tqdm
from typing import List, Optional, Tuple, Union
from .audio import (
FRAMES_PER_SECOND,
HOP_LENGTH,
N_FRAMES,
N_SAMPLES,
SAMPLE_RATE,
log_mel_spectrogram,
pad_or_trim,
)
from .decoding import DecodingOptions, DecodingResult
from .load_models import load_model
from .timing import add_word_timestamps
from .tokenizer import LANGUAGES, get_tokenizer | 5,388 | model_path = None
@classmethod
def get_model(cls, model_path: str, dtype: mx.Dtype):
if cls.model is None or model_path != cls.model_path:
cls.model = load_model(model_path, dtype=dtype)
cls.model_path = model_path
return cls.model
def transcribe(
audio: Union[str, np.ndarray, mx.array],
*,
path_or_hf_repo: str = "mlx-community/whisper-tiny",
verbose: Optional[bool] = None,
temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
compression_ratio_threshold: Optional[float] = 2.4,
logprob_threshold: Optional[float] = -1.0,
no_speech_threshold: Optional[float] = 0.6,
condition_on_previous_text: bool = True,
initial_prompt: Optional[str] = None,
word_timestamps: bool = False,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
clip_timestamps: Union[str, List[float]] = "0",
hallucination_silence_threshold: Optional[float] = None,
**decode_options,
):
"""
Transcribe an audio file using Whisper
Parameters
----------
audio: Union[str, np.ndarray, mx.array]
The path to the audio file to open, or the audio waveform
path_or_hf_repo: str
The localpath to the Whisper model or HF Hub repo with the MLX converted weights.
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = mx.float16 if decode_options.get("fp16", True) else mx.float32
model = ModelHolder.get_model(path_or_hf_repo, dtype)
# Pad 30-seconds of silence to the input audio, for slicing
mel = log_mel_spectrogram(audio, n_mels=model.dims.n_mels, padding=N_SAMPLES)
content_frames = mel.shape[-2] - N_FRAMES
content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE)
if verbose:
system_encoding = sys.getdefaultencoding()
if system_encoding != "utf-8":
make_safe = lambda x: x.encode(system_encoding, errors="replace").decode(
system_encoding
)
else:
make_safe = lambda x: x
if decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. "
"Use the `language` decoding option to specify the language"
)
| # Copyright © 2023 Apple Inc.
def _format_timestamp(seconds: float):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
def _get_end(segments: List[dict]) -> Optional[float]:
return next(
(w["end"] for s in reversed(segments) for w in reversed(s["words"])),
segments[-1]["end"] if segments else None,
)
class ModelHolder:
model = None
model_path = None
@classmethod
def get_model(cls, model_path: str, dtype: mx.Dtype):
if cls.model is None or model_path != cls.model_path:
cls.model = load_model(model_path, dtype=dtype)
cls.model_path = model_path
return cls.model
def transcribe(
audio: Union[str, np.ndarray, mx.array],
*,
path_or_hf_repo: str = "mlx-community/whisper-tiny",
verbose: Optional[bool] = None,
temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
compression_ratio_threshold: Optional[float] = 2.4,
logprob_threshold: Optional[float] = -1.0,
no_speech_threshold: Optional[float] = 0.6,
condition_on_previous_text: bool = True,
initial_prompt: Optional[str] = None,
word_timestamps: bool = False,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
clip_timestamps: Union[str, List[float]] = "0",
hallucination_silence_threshold: Optional[float] = None,
**decode_options,
):
"""
Transcribe an audio file using Whisper
Parameters
----------
audio: Union[str, np.ndarray, mx.array]
The path to the audio file to open, or the audio waveform
path_or_hf_repo: str
The localpath to the Whisper model or HF Hub repo with the MLX converted weights.
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = mx.float16 if decode_options.get("fp16", True) else mx.float32
model = ModelHolder.get_model(path_or_hf_repo, dtype)
# Pad 30-seconds of silence to the input audio, for slicing
mel = log_mel_spectrogram(audio, n_mels=model.dims.n_mels, padding=N_SAMPLES)
content_frames = mel.shape[-2] - N_FRAMES
content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE)
if verbose:
system_encoding = sys.getdefaultencoding()
if system_encoding != "utf-8":
make_safe = lambda x: x.encode(system_encoding, errors="replace").decode(
system_encoding
)
else:
make_safe = lambda x: x
if decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. "
"Use the `language` decoding option to specify the language"
) | mel_segment = pad_or_trim(mel, N_FRAMES, axis=-2).astype(dtype) | 6 | 2023-12-09 13:33:46+00:00 | 8k |
SqueezeAILab/LLMCompiler | src/llm_compiler/planner.py | [
{
"identifier": "Plan",
"path": "src/executors/schema.py",
"snippet": "class Plan(BaseModel):\n \"\"\"Plan.\"\"\"\n\n steps: list[Step]\n \"\"\"The steps.\"\"\""
},
{
"identifier": "END_OF_PLAN",
"path": "src/llm_compiler/constants.py",
"snippet": "END_OF_PLAN = \"<END_OF_PLAN>\""
},
{
"identifier": "ACTION_PATTERN",
"path": "src/llm_compiler/output_parser.py",
"snippet": "ACTION_PATTERN = r\"\\n*(\\d+)\\. (\\w+)\\((.*)\\)(\\s*#\\w+\\n)?\""
},
{
"identifier": "THOUGHT_PATTERN",
"path": "src/llm_compiler/output_parser.py",
"snippet": "THOUGHT_PATTERN = r\"Thought: ([^\\n]*)\""
},
{
"identifier": "LLMCompilerPlanParser",
"path": "src/llm_compiler/output_parser.py",
"snippet": "class LLMCompilerPlanParser(AgentOutputParser, extra=\"allow\"):\n \"\"\"Planning output parser.\"\"\"\n\n def __init__(self, tools: Sequence[Union[Tool, StructuredTool]], **kwargs):\n super().__init__(**kwargs)\n self.tools = tools\n\n def parse(self, text: str) -> list[str]:\n # 1. search(\"Ronaldo number of kids\") -> 1, \"search\", '\"Ronaldo number of kids\"'\n # pattern = r\"(\\d+)\\. (\\w+)\\(([^)]+)\\)\"\n pattern = rf\"(?:{THOUGHT_PATTERN}\\n)?{ACTION_PATTERN}\"\n matches = re.findall(pattern, text)\n\n graph_dict = {}\n\n for match in matches:\n # idx = 1, function = \"search\", args = \"Ronaldo number of kids\"\n # thought will be the preceding thought, if any, otherwise an empty string\n thought, idx, tool_name, args, _ = match\n idx = int(idx)\n\n task = instantiate_task(\n tools=self.tools,\n idx=idx,\n tool_name=tool_name,\n args=args,\n thought=thought,\n )\n\n graph_dict[idx] = task\n if task.is_join:\n break\n\n return graph_dict"
},
{
"identifier": "instantiate_task",
"path": "src/llm_compiler/output_parser.py",
"snippet": "def instantiate_task(\n tools: Sequence[Union[Tool, StructuredTool]],\n idx: int,\n tool_name: str,\n args: str,\n thought: str,\n) -> Task:\n dependencies = _get_dependencies_from_graph(idx, tool_name, args)\n args = _parse_llm_compiler_action_args(args)\n if tool_name == \"join\":\n # join does not have a tool\n tool_func = lambda x: None\n stringify_rule = None\n else:\n tool = _find_tool(tool_name, tools)\n tool_func = tool.func\n stringify_rule = tool.stringify_rule\n return Task(\n idx=idx,\n name=tool_name,\n tool=tool_func,\n args=args,\n dependencies=dependencies,\n stringify_rule=stringify_rule,\n thought=thought,\n is_join=tool_name == \"join\",\n )"
},
{
"identifier": "Task",
"path": "src/llm_compiler/task_fetching_unit.py",
"snippet": "class Task:\n idx: int\n name: str\n tool: Callable\n args: Collection[Any]\n dependencies: Collection[int]\n stringify_rule: Optional[Callable] = None\n thought: Optional[str] = None\n observation: Optional[str] = None\n is_join: bool = False\n\n async def __call__(self) -> Any:\n log(\"running task\")\n x = await self.tool(*self.args)\n log(\"done task\")\n return x\n\n def get_though_action_observation(\n self, include_action=True, include_thought=True, include_action_idx=False\n ) -> str:\n thought_action_observation = \"\"\n if self.thought and include_thought:\n thought_action_observation = f\"Thought: {self.thought}\\n\"\n if include_action:\n idx = f\"{self.idx}. \" if include_action_idx else \"\"\n if self.stringify_rule:\n # If the user has specified a custom stringify rule for the\n # function argument, use it\n thought_action_observation += f\"{idx}{self.stringify_rule(self.args)}\\n\"\n else:\n # Otherwise, we have a default stringify rule\n thought_action_observation += (\n f\"{idx}{self.name}\"\n f\"{_default_stringify_rule_for_arguments(self.args)}\\n\"\n )\n if self.observation is not None:\n thought_action_observation += f\"Observation: {self.observation}\\n\"\n return thought_action_observation"
},
{
"identifier": "StructuredTool",
"path": "src/tools/base.py",
"snippet": "class StructuredTool(BaseTool):\n \"\"\"Tool that can operate on any number of inputs.\"\"\"\n\n description: str = \"\"\n args_schema: Type[BaseModel] = Field(..., description=\"The tool schema.\")\n \"\"\"The input arguments' schema.\"\"\"\n func: Optional[Callable[..., Any]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n return self.args_schema.schema()[\"properties\"]\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n return await asyncio.get_running_loop().run_in_executor(\n None,\n self._run,\n partial(self._run, run_manager=run_manager, **kwargs),\n *args,\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable] = None,\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n infer_schema: bool = True,\n **kwargs: Any,\n ) -> StructuredTool:\n \"\"\"Create tool from a given function.\n\n A classmethod that helps to create a tool from a function.\n\n Args:\n func: The function from which to create a tool\n coroutine: The async function from which to create a tool\n name: The name of the tool. Defaults to the function name\n description: The description of the tool. Defaults to the function docstring\n return_direct: Whether to return the result directly or as a callback\n args_schema: The schema of the tool's input arguments\n infer_schema: Whether to infer the schema from the function's signature\n **kwargs: Additional arguments to pass to the tool\n\n Returns:\n The tool\n\n Examples:\n\n .. code-block:: python\n\n def add(a: int, b: int) -> int:\n \\\"\\\"\\\"Add two numbers\\\"\\\"\\\"\n return a + b\n tool = StructuredTool.from_function(add)\n tool.run(1, 2) # 3\n \"\"\"\n\n if func is not None:\n source_function = func\n elif coroutine is not None:\n source_function = coroutine\n else:\n raise ValueError(\"Function and/or coroutine must be provided\")\n name = name or source_function.__name__\n description = description or source_function.__doc__\n if description is None:\n raise ValueError(\n \"Function must have a docstring if description not provided.\"\n )\n\n # Description example:\n # search_api(query: str) - Searches the API for the query.\n sig = signature(source_function)\n description = f\"{name}{sig} - {description.strip()}\"\n _args_schema = args_schema\n if _args_schema is None and infer_schema:\n _args_schema = create_schema_from_function(f\"{name}Schema\", source_function)\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n args_schema=_args_schema,\n description=description,\n return_direct=return_direct,\n **kwargs,\n )"
},
{
"identifier": "Tool",
"path": "src/tools/base.py",
"snippet": "class Tool(BaseTool):\n \"\"\"Tool that takes in function or coroutine directly.\"\"\"\n\n description: str = \"\"\n func: Optional[Callable[..., str]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[str]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n if self.args_schema is not None:\n return self.args_schema.schema()[\"properties\"]\n # For backwards compatibility, if the function signature is ambiguous,\n # assume it takes a single string input.\n return {\"tool_input\": {\"type\": \"string\"}}\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n args, kwargs = super()._to_args_and_kwargs(tool_input)\n # For backwards compatibility. The tool must be run with a single input\n all_args = list(args) + list(kwargs.values())\n if len(all_args) != 1:\n raise ToolException(\n f\"Too many arguments to single-input tool {self.name}.\"\n f\" Args: {all_args}\"\n )\n return tuple(all_args), {}\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n else:\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self._run, run_manager=run_manager, **kwargs), *args\n )\n\n # TODO: this is for backwards compatibility, remove in future\n def __init__(\n self, name: str, func: Optional[Callable], description: str, **kwargs: Any\n ) -> None:\n \"\"\"Initialize tool.\"\"\"\n super(Tool, self).__init__(\n name=name, func=func, description=description, **kwargs\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable],\n name: str, # We keep these required to support backwards compatibility\n description: str,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n coroutine: Optional[\n Callable[..., Awaitable[Any]]\n ] = None, # This is last for compatibility, but should be after func\n **kwargs: Any,\n ) -> Tool:\n \"\"\"Initialize tool from a function.\"\"\"\n if func is None and coroutine is None:\n raise ValueError(\"Function and/or coroutine must be provided\")\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n description=description,\n return_direct=return_direct,\n args_schema=args_schema,\n **kwargs,\n )"
},
{
"identifier": "log",
"path": "src/utils/logger_utils.py",
"snippet": "def log(self, latency: float, answer: str, label: str, key: str) -> None:\n self._latency_dict[key].append(latency)\n self._answer_dict[key].append(answer)\n self._label_dict[key].append(label)"
}
] | import asyncio
import re
from typing import Any, Optional, Sequence, Union
from uuid import UUID
from langchain.callbacks.base import AsyncCallbackHandler, Callbacks
from langchain.chat_models.base import BaseChatModel
from langchain.schema import LLMResult
from langchain.schema.messages import HumanMessage, SystemMessage
from src.executors.schema import Plan
from src.llm_compiler.constants import END_OF_PLAN
from src.llm_compiler.output_parser import (
ACTION_PATTERN,
THOUGHT_PATTERN,
LLMCompilerPlanParser,
instantiate_task,
)
from src.llm_compiler.task_fetching_unit import Task
from src.tools.base import StructuredTool, Tool
from src.utils.logger_utils import log | 5,415 | args=args,
thought=self.thought,
)
self.buffer = suffix
self.thought = ""
return task
return None
def ingest_token(self, token: str) -> Optional[Task]:
# Append token to buffer
if "\n" in token:
prefix, suffix = token.split("\n", 1)
prefix = prefix.strip()
self.buffer += prefix + "\n"
return self._match_buffer_and_generate_task(suffix)
else:
self.buffer += token
return None
def finalize(self):
self.buffer = self.buffer + "\n"
return self._match_buffer_and_generate_task("")
class LLMCompilerCallback(AsyncCallbackHandler):
_queue: asyncio.Queue[Optional[Task]]
_parser: StreamingGraphParser
_tools: Sequence[Union[Tool, StructuredTool]]
def __init__(
self,
queue: asyncio.Queue[Optional[str]],
tools: Sequence[Union[Tool, StructuredTool]],
):
self._queue = queue
self._parser = StreamingGraphParser(tools=tools)
async def on_llm_start(self, serialized, prompts, **kwargs: Any) -> Any:
"""Run when LLM starts running."""
async def on_llm_new_token(
self,
token: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.ingest_token(token)
if parsed_data:
await self._queue.put(parsed_data)
if parsed_data.is_join:
await self._queue.put(None)
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.finalize()
if parsed_data:
await self._queue.put(parsed_data)
await self._queue.put(None)
class Planner:
def __init__(
self,
llm: BaseChatModel,
example_prompt: str,
example_prompt_replan: str,
tools: Sequence[Union[Tool, StructuredTool]],
stop: Optional[list[str]],
):
self.llm = llm
# different system prompt is needed when replanning
# since they have different guidelines, and also examples provided by the user
self.system_prompt = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt,
is_replan=False,
)
self.system_prompt_replan = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt_replan,
is_replan=True,
)
self.tools = tools
self.output_parser = LLMCompilerPlanParser(tools=tools)
self.stop = stop
async def run_llm(
self,
inputs: dict[str, Any],
is_replan: bool = False,
callbacks: Callbacks = None,
) -> str:
"""Run the LLM."""
if is_replan:
system_prompt = self.system_prompt_replan
assert "context" in inputs, "If replanning, context must be provided"
human_prompt = f"Question: {inputs['input']}\n{inputs['context']}\n"
else:
system_prompt = self.system_prompt
human_prompt = f"Question: {inputs['input']}"
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=human_prompt),
]
llm_response = await self.llm._call_async(
messages,
callbacks=callbacks,
stop=self.stop,
)
| """LLM Compiler Planner"""
JOIN_DESCRIPTION = (
"join():\n"
" - Collects and combines results from prior actions.\n"
" - A LLM agent is called upon invoking join to either finalize the user query or wait until the plans are executed.\n"
" - join should always be the last action in the plan, and will be called in two scenarios:\n"
" (a) if the answer can be determined by gathering the outputs from tasks to generate the final response.\n"
" (b) if the answer cannot be determined in the planning phase before you execute the plans. "
)
def generate_llm_compiler_prompt(
tools: Sequence[Union[Tool, StructuredTool]],
example_prompt=str,
is_replan: bool = False,
):
prefix = (
"Given a user query, create a plan to solve it with the utmost parallelizability. "
f"Each plan should comprise an action from the following {len(tools) + 1} types:\n"
)
# Tools
for i, tool in enumerate(tools):
prefix += f"{i+1}. {tool.description}\n"
# Join operation
prefix += f"{i+2}. {JOIN_DESCRIPTION}\n\n"
# Guidelines
prefix += (
"Guidelines:\n"
" - Each action described above contains input/output types and description.\n"
" - You must strictly adhere to the input and output types for each action.\n"
" - The action descriptions contain the guidelines. You MUST strictly follow those guidelines when you use the actions.\n"
" - Each action in the plan should strictly be one of the above types. Follow the Python conventions for each action.\n"
" - Each action MUST have a unique ID, which is strictly increasing.\n"
" - Inputs for actions can either be constants or outputs from preceding actions. "
"In the latter case, use the format $id to denote the ID of the previous action whose output will be the input.\n"
f" - Always call join as the last action in the plan. Say '{END_OF_PLAN}' after you call join\n"
" - Ensure the plan maximizes parallelizability.\n"
" - Only use the provided action types. If a query cannot be addressed using these, invoke the join action for the next steps.\n"
" - Never explain the plan with comments (e.g. #).\n"
" - Never introduce new actions other than the ones provided.\n\n"
)
if is_replan:
prefix += (
' - You are given "Previous Plan" which is the plan that the previous agent created along with the execution results '
"(given as Observation) of each plan and a general thought (given as Thought) about the executed results."
'You MUST use these information to create the next plan under "Current Plan".\n'
' - When starting the Current Plan, you should start with "Thought" that outlines the strategy for the next plan.\n'
" - In the Current Plan, you should NEVER repeat the actions that are already executed in the Previous Plan.\n"
)
# Examples
prefix += "Here are some examples:\n\n"
prefix += example_prompt
return prefix
class StreamingGraphParser:
"""Streaming version of the GraphParser."""
buffer = ""
thought = ""
graph_dict = {}
def __init__(self, tools: Sequence[Union[Tool, StructuredTool]]) -> None:
self.tools = tools
def _match_buffer_and_generate_task(self, suffix: str) -> Optional[Task]:
"""Runs every time "\n" is encountered in the input stream or at the end of the stream.
Matches the buffer against the regex patterns and generates a task if a match is found.
Match patterns include:
1. Thought: <thought>
- this case, the thought is stored in self.thought, and we reset the buffer.
- the thought is then used as the thought for the next action.
2. <idx>. <tool_name>(<args>)
- this case, the tool is instantiated with the idx, tool_name, args, and thought.
- the thought is reset.
- the buffer is reset.
"""
if match := re.match(THOUGHT_PATTERN, self.buffer):
# Optionally, action can be preceded by a thought
self.thought = match.group(1)
self.buffer = suffix
elif match := re.match(ACTION_PATTERN, self.buffer):
# if action is parsed, return the task, and clear the buffer
idx, tool_name, args, _ = match.groups()
idx = int(idx)
task = instantiate_task(
tools=self.tools,
idx=idx,
tool_name=tool_name,
args=args,
thought=self.thought,
)
self.buffer = suffix
self.thought = ""
return task
return None
def ingest_token(self, token: str) -> Optional[Task]:
# Append token to buffer
if "\n" in token:
prefix, suffix = token.split("\n", 1)
prefix = prefix.strip()
self.buffer += prefix + "\n"
return self._match_buffer_and_generate_task(suffix)
else:
self.buffer += token
return None
def finalize(self):
self.buffer = self.buffer + "\n"
return self._match_buffer_and_generate_task("")
class LLMCompilerCallback(AsyncCallbackHandler):
_queue: asyncio.Queue[Optional[Task]]
_parser: StreamingGraphParser
_tools: Sequence[Union[Tool, StructuredTool]]
def __init__(
self,
queue: asyncio.Queue[Optional[str]],
tools: Sequence[Union[Tool, StructuredTool]],
):
self._queue = queue
self._parser = StreamingGraphParser(tools=tools)
async def on_llm_start(self, serialized, prompts, **kwargs: Any) -> Any:
"""Run when LLM starts running."""
async def on_llm_new_token(
self,
token: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.ingest_token(token)
if parsed_data:
await self._queue.put(parsed_data)
if parsed_data.is_join:
await self._queue.put(None)
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.finalize()
if parsed_data:
await self._queue.put(parsed_data)
await self._queue.put(None)
class Planner:
def __init__(
self,
llm: BaseChatModel,
example_prompt: str,
example_prompt_replan: str,
tools: Sequence[Union[Tool, StructuredTool]],
stop: Optional[list[str]],
):
self.llm = llm
# different system prompt is needed when replanning
# since they have different guidelines, and also examples provided by the user
self.system_prompt = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt,
is_replan=False,
)
self.system_prompt_replan = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt_replan,
is_replan=True,
)
self.tools = tools
self.output_parser = LLMCompilerPlanParser(tools=tools)
self.stop = stop
async def run_llm(
self,
inputs: dict[str, Any],
is_replan: bool = False,
callbacks: Callbacks = None,
) -> str:
"""Run the LLM."""
if is_replan:
system_prompt = self.system_prompt_replan
assert "context" in inputs, "If replanning, context must be provided"
human_prompt = f"Question: {inputs['input']}\n{inputs['context']}\n"
else:
system_prompt = self.system_prompt
human_prompt = f"Question: {inputs['input']}"
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=human_prompt),
]
llm_response = await self.llm._call_async(
messages,
callbacks=callbacks,
stop=self.stop,
) | log("LLMCompiler planner response: \n", llm_response.content, block=True) | 9 | 2023-12-06 21:12:54+00:00 | 8k |
open-compass/MixtralKit | mixtralkit/layers/moe.py | [
{
"identifier": "ModelArgs",
"path": "mixtralkit/layers/utils.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n ffn_dim_multiplier: Optional[float] = None\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048"
},
{
"identifier": "TorchAttention",
"path": "mixtralkit/layers/attention.py",
"snippet": "class TorchAttention(nn.Module):\n \"\"\"Multi-head attention module.\"\"\"\n def __init__(self, args: ModelArgs):\n \"\"\"\n Initialize the Attention module.\n\n Args:\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_kv_heads (int): Number of key and value heads.\n n_local_heads (int): Number of local query heads.\n n_local_kv_heads (int): Number of local key and value heads.\n n_rep (int): Number of repetitions for local heads.\n head_dim (int): Dimension size of each attention head.\n wq (ColumnParallelLinear): Linear transformation for queries.\n wk (ColumnParallelLinear): Linear transformation for keys.\n wv (ColumnParallelLinear): Linear transformation for values.\n wo (RowParallelLinear): Linear transformation for output.\n cache_k (torch.Tensor): Cached keys for attention.\n cache_v (torch.Tensor): Cached values for attention.\n\n \"\"\"\n \n\n super().__init__()\n self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads\n model_parallel_size = 1\n self.n_local_heads = args.n_heads // model_parallel_size\n self.n_local_kv_heads = self.n_kv_heads // model_parallel_size\n self.n_rep = self.n_local_heads // self.n_local_kv_heads\n self.head_dim = args.dim // args.n_heads\n\n self.wq = nn.Linear(\n args.dim,\n args.n_heads * self.head_dim,\n bias=False,\n )\n self.wk = nn.Linear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n )\n self.wv = nn.Linear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n )\n self.wo = nn.Linear(\n args.n_heads * self.head_dim,\n args.dim,\n bias=False,\n )\n\n self.cache_k = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n self.cache_v = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n\n def forward(\n self,\n x: torch.Tensor,\n start_pos: int,\n freqs_cis: torch.Tensor,\n mask: Optional[torch.Tensor],\n ):\n \"\"\"\n Forward pass of the attention module.\n\n Args:\n x (torch.Tensor): Input tensor.\n start_pos (int): Starting position for caching.\n freqs_cis (torch.Tensor): Precomputed frequency tensor.\n mask (torch.Tensor, optional): Attention mask tensor.\n\n Returns:\n torch.Tensor: Output tensor after attention.\n\n \"\"\"\n bsz, seqlen, _ = x.shape\n xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)\n\n xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)\n xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n\n xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)\n\n self.cache_k = self.cache_k.to(xq)\n self.cache_v = self.cache_v.to(xq)\n\n self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk\n self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv\n\n keys = self.cache_k[:bsz, : start_pos + seqlen]\n values = self.cache_v[:bsz, : start_pos + seqlen]\n\n # repeat k/v heads if n_kv_heads < n_heads\n keys = repeat_kv(keys, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim)\n values = repeat_kv(values, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim)\n\n xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)\n keys = keys.transpose(1, 2) # (bs, n_local_heads, cache_len + seqlen, head_dim)\n values = values.transpose(1, 2) # (bs, n_local_heads, cache_len + seqlen, head_dim)\n scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)\n if mask is not None:\n scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)\n scores = F.softmax(scores.float(), dim=-1).type_as(xq)\n output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)\n output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)\n return self.wo(output)"
},
{
"identifier": "FairScaleAttention",
"path": "mixtralkit/layers/attention.py",
"snippet": "class FairScaleAttention(TorchAttention):\n \"\"\"Multi-head attention module.\n \n Modified from \n \"\"\"\n def __init__(self, args: ModelArgs):\n \"\"\"\n Initialize the Attention module.\n\n Args:\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_kv_heads (int): Number of key and value heads.\n n_local_heads (int): Number of local query heads.\n n_local_kv_heads (int): Number of local key and value heads.\n n_rep (int): Number of repetitions for local heads.\n head_dim (int): Dimension size of each attention head.\n wq (ColumnParallelLinear): Linear transformation for queries.\n wk (ColumnParallelLinear): Linear transformation for keys.\n wv (ColumnParallelLinear): Linear transformation for values.\n wo (RowParallelLinear): Linear transformation for output.\n cache_k (torch.Tensor): Cached keys for attention.\n cache_v (torch.Tensor): Cached values for attention.\n\n \"\"\"\n import fairscale.nn.model_parallel.initialize as fs_init\n from fairscale.nn.model_parallel.layers import (\n ColumnParallelLinear,\n RowParallelLinear,\n )\n\n super().__init__(args)\n self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads\n model_parallel_size = fs_init.get_model_parallel_world_size()\n self.n_local_heads = args.n_heads // model_parallel_size\n self.n_local_kv_heads = self.n_kv_heads // model_parallel_size\n self.n_rep = self.n_local_heads // self.n_local_kv_heads\n self.head_dim = args.dim // args.n_heads\n\n self.wq = ColumnParallelLinear(\n args.dim,\n args.n_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wk = ColumnParallelLinear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wv = ColumnParallelLinear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wo = RowParallelLinear(\n args.n_heads * self.head_dim,\n args.dim,\n bias=False,\n input_is_parallel=True,\n init_method=lambda x: x,\n )\n\n self.cache_k = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n self.cache_v = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()"
},
{
"identifier": "TorchFFN",
"path": "mixtralkit/layers/ffn.py",
"snippet": "class TorchFFN(nn.Module):\n def __init__(\n self,\n dim: int,\n hidden_dim: int,\n ):\n \"\"\"\n Initialize the FeedForward module.\n\n Args:\n dim (int): Input dimension.\n hidden_dim (int): Hidden dimension of the feedforward layer.\n multiple_of (int): Value to ensure hidden dimension is a multiple of this value.\n ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.\n\n Attributes:\n w1 (ColumnParallelLinear): Linear transformation for the first layer.\n w2 (RowParallelLinear): Linear transformation for the second layer.\n w3 (ColumnParallelLinear): Linear transformation for the third layer.\n\n \"\"\"\n super().__init__()\n\n self.w1 = nn.Linear(\n dim, hidden_dim, bias=False\n )\n self.w2 = nn.Linear(\n hidden_dim, dim, bias=False\n )\n self.w3 = nn.Linear(\n dim, hidden_dim, bias=False\n )\n\n def forward(self, x):\n device = x.device\n x = x.to(self.w1.weight.device)\n return self.w2(F.silu(self.w1(x)) * self.w3(x)).to(device)"
},
{
"identifier": "FairScaleFFN",
"path": "mixtralkit/layers/ffn.py",
"snippet": "class FairScaleFFN(nn.Module):\n def __init__(\n self,\n dim: int,\n hidden_dim: int,\n multiple_of: int,\n ffn_dim_multiplier: Optional[float],\n ):\n \"\"\"\n Initialize the FeedForward module.\n\n Args:\n dim (int): Input dimension.\n hidden_dim (int): Hidden dimension of the feedforward layer.\n multiple_of (int): Value to ensure hidden dimension is a multiple of this value.\n ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.\n\n Attributes:\n w1 (ColumnParallelLinear): Linear transformation for the first layer.\n w2 (RowParallelLinear): Linear transformation for the second layer.\n w3 (ColumnParallelLinear): Linear transformation for the third layer.\n\n \"\"\"\n super().__init__()\n hidden_dim = int(2 * hidden_dim / 3)\n # custom dim factor multiplier\n if ffn_dim_multiplier is not None:\n hidden_dim = int(ffn_dim_multiplier * hidden_dim)\n hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)\n\n from fairscale.nn.model_parallel.layers import (\n ColumnParallelLinear,\n RowParallelLinear,\n )\n self.w1 = ColumnParallelLinear(\n dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x\n )\n self.w2 = RowParallelLinear(\n hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x\n )\n self.w3 = ColumnParallelLinear(\n dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x\n )\n\n def forward(self, x):\n return self.w2(F.silu(self.w1(x)) * self.w3(x))"
},
{
"identifier": "TorchTransformerBlock",
"path": "mixtralkit/layers/transformer.py",
"snippet": "class TorchTransformerBlock(nn.Module):\n def __init__(self, layer_id: int, args: ModelArgs):\n \"\"\"\n Initialize a TransformerBlock.\n\n Args:\n layer_id (int): Identifier for the layer.\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_heads (int): Number of attention heads.\n dim (int): Dimension size of the model.\n head_dim (int): Dimension size of each attention head.\n attention (Attention): Attention module.\n feed_forward (FeedForward): FeedForward module.\n layer_id (int): Identifier for the layer.\n attention_norm (RMSNorm): Layer normalization for attention output.\n ffn_norm (RMSNorm): Layer normalization for feedforward output.\n\n \"\"\"\n super().__init__()\n self.n_heads = args.n_heads\n self.dim = args.dim\n self.head_dim = args.dim // args.n_heads\n self.attention = TorchAttention(args)\n self.feed_forward = TorchFFN(\n dim=args.dim,\n hidden_dim=4 * args.dim,\n )\n self.layer_id = layer_id\n self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)\n self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)\n\n def forward(\n self,\n x: torch.Tensor,\n start_pos: int,\n freqs_cis: torch.Tensor,\n mask: Optional[torch.Tensor],\n ):\n \"\"\"\n Perform a forward pass through the TransformerBlock.\n\n Args:\n x (torch.Tensor): Input tensor.\n start_pos (int): Starting position for attention caching.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n mask (torch.Tensor, optional): Masking tensor for attention. Defaults to None.\n\n Returns:\n torch.Tensor: Output tensor after applying attention and feedforward layers.\n\n \"\"\"\n h = x + self.attention.forward(\n self.attention_norm(x), start_pos, freqs_cis, mask\n )\n out = h + self.feed_forward.forward(self.ffn_norm(h))\n return out"
},
{
"identifier": "TorchTransformer",
"path": "mixtralkit/layers/transformer.py",
"snippet": "class TorchTransformer(nn.Module):\n def __init__(self, params: ModelArgs):\n \"\"\"\n Initialize a Transformer model.\n\n Args:\n params (ModelArgs): Model configuration parameters.\n\n Attributes:\n params (ModelArgs): Model configuration parameters.\n vocab_size (int): Vocabulary size.\n n_layers (int): Number of layers in the model.\n tok_embeddings (ParallelEmbedding): Token embeddings.\n layers (torch.nn.ModuleList): List of Transformer blocks.\n norm (RMSNorm): Layer normalization for the model output.\n output (ColumnParallelLinear): Linear layer for final output.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n\n \"\"\"\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(\n params.vocab_size, params.dim\n )\n\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TorchTransformerBlock(layer_id, params))\n\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(\n params.dim, params.vocab_size, bias=False\n )\n self.freqs_cis = precompute_freqs_cis(\n # Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096.\n # Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.\n dim=self.params.dim // self.params.n_heads,\n end=self.params.max_seq_len * 2,\n theta=self.params.rope_theta,\n )\n\n @torch.inference_mode()\n def forward(self, tokens: torch.Tensor, start_pos: int):\n \"\"\"\n Perform a forward pass through the Transformer model.\n\n Args:\n tokens (torch.Tensor): Input token indices.\n start_pos (int): Starting position for attention caching.\n\n Returns:\n torch.Tensor: Output logits after applying the Transformer model.\n\n \"\"\"\n _bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n self.freqs_cis = self.freqs_cis.to(h.device)\n freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]\n\n mask = None\n if seqlen > 1:\n mask = torch.full(\n (seqlen, seqlen), float(\"-inf\"), device=tokens.device\n )\n\n mask = torch.triu(mask, diagonal=1)\n\n # When performing key-value caching, we compute the attention scores\n # only for the new sequence. Thus, the matrix of scores is of size\n # (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for\n # j > cache_len + i, since row i corresponds to token cache_len + i.\n mask = torch.hstack([\n torch.zeros((seqlen, start_pos), device=tokens.device),\n mask\n ]).type_as(h)\n\n for layer in self.layers:\n h = layer(h, start_pos, freqs_cis, mask)\n h = self.norm(h)\n output = self.output(h).float()\n return output"
},
{
"identifier": "FairScaleTransformer",
"path": "mixtralkit/layers/transformer.py",
"snippet": "class FairScaleTransformer(TorchTransformer):\n def __init__(self, params: ModelArgs):\n \"\"\"\n Initialize a Transformer model.\n\n Args:\n params (ModelArgs): Model configuration parameters.\n\n Attributes:\n params (ModelArgs): Model configuration parameters.\n vocab_size (int): Vocabulary size.\n n_layers (int): Number of layers in the model.\n tok_embeddings (ParallelEmbedding): Token embeddings.\n layers (torch.nn.ModuleList): List of Transformer blocks.\n norm (RMSNorm): Layer normalization for the model output.\n output (ColumnParallelLinear): Linear layer for final output.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n\n \"\"\"\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n from fairscale.nn.model_parallel.layers import (\n ColumnParallelLinear,\n ParallelEmbedding,\n )\n self.tok_embeddings = ParallelEmbedding(\n params.vocab_size, params.dim, init_method=lambda x: x\n )\n\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(FairScaleTransformerBlock(layer_id, params))\n\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = ColumnParallelLinear(\n params.dim, params.vocab_size, bias=False, init_method=lambda x: x\n )\n\n self.freqs_cis = precompute_freqs_cis(\n # Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096.\n # Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.\n dim=self.params.dim // self.params.n_heads,\n end=self.params.max_seq_len * 2,\n theta=self.params.rope_theta,\n )"
}
] | import math
import torch
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
from torch import nn
from .utils import ModelArgs
from .attention import TorchAttention, FairScaleAttention
from .ffn import TorchFFN, FairScaleFFN
from .transformer import TorchTransformerBlock, TorchTransformer, FairScaleTransformer
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
) | 5,169 | # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class MoETorchFFN(nn.Module):
def __init__(
self,
num_experts: int,
num_experts_per_tok: int,
num_shards: int,
gate_softmax: bool = False,
**kwargs,
):
super().__init__()
self.experts = nn.ModuleList([
TorchFFN(**kwargs).to(f"cuda:{i//num_shards}")
for i in range(num_experts)]
)
self.gate = nn.Linear(
kwargs["dim"], num_experts, bias=False)
self.num_experts_per_tok = num_experts_per_tok
self.gate_softmax = gate_softmax
print("Softmax for Gate:{}".format(str(gate_softmax)))
def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])
if self.gate_softmax:
scores = self.gate(x).softmax(dim=-1)
else:
scores = self.gate(x)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_tok, dim=-1)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)
x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
y = torch.empty_like(x)
for i, expert in enumerate(self.experts):
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(dim=1)
return y.view(*orig_shape)
| # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class MoETorchFFN(nn.Module):
def __init__(
self,
num_experts: int,
num_experts_per_tok: int,
num_shards: int,
gate_softmax: bool = False,
**kwargs,
):
super().__init__()
self.experts = nn.ModuleList([
TorchFFN(**kwargs).to(f"cuda:{i//num_shards}")
for i in range(num_experts)]
)
self.gate = nn.Linear(
kwargs["dim"], num_experts, bias=False)
self.num_experts_per_tok = num_experts_per_tok
self.gate_softmax = gate_softmax
print("Softmax for Gate:{}".format(str(gate_softmax)))
def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])
if self.gate_softmax:
scores = self.gate(x).softmax(dim=-1)
else:
scores = self.gate(x)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_tok, dim=-1)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)
x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
y = torch.empty_like(x)
for i, expert in enumerate(self.experts):
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(dim=1)
return y.view(*orig_shape)
| class MoETorchTransformerBlock(TorchTransformerBlock): | 5 | 2023-12-09 15:05:26+00:00 | 8k |
aymenfurter/microagents | agents/agent_lifecycle.py | [
{
"identifier": "MicroAgent",
"path": "agents/microagent.py",
"snippet": "class MicroAgent:\n \"\"\"\n The MicroAgent class encapsulates the behavior of a small, purpose-driven agent\n that interacts with the OpenAI API.\n \"\"\"\n\n def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None) :\n self.dynamic_prompt = initial_prompt\n self.purpose = purpose\n self.purpose_embedding = purpose_embedding \n self.depth = depth\n self.max_depth = max_depth\n self.usage_count = 0\n self.working_agent = bootstrap_agent\n self.agent_lifecycle = agent_lifecycle\n self.openai_wrapper = openai_wrapper\n self.evolve_count = 0\n self.number_of_code_executions = 0 \n self.current_status = None\n self.active_agents = {} \n self.last_input = \"\"\n self.last_output = \"\"\n self.last_conversation = \"\"\n self.stopped = False\n self.is_prime = is_prime\n\n # Initialize components used by the agent\n self.agent_evaluator = AgentEvaluator(self.openai_wrapper)\n self.code_executor = CodeExecution()\n self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)\n self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)\n self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)\n self.response_extractor = ResponseExtraction(self.openai_wrapper)\n self.response_handler = ResponseHandler(self)\n\n def update_status(self, status):\n \"\"\"Update the agent's current status.\"\"\"\n self.check_for_stopped()\n self.current_status = status\n logger.info(f\"Agent {self.purpose} status updated to: {status}\")\n\n def update_active_agents(self, calling_agent, called_agent=None):\n \"\"\"Update the tree view of active agents.\"\"\"\n if called_agent:\n self.active_agents[calling_agent] = called_agent\n else:\n self.active_agents.pop(calling_agent, None)\n logger.info(f\"Active agents updated: {self.active_agents}\")\n\n def set_agent_as_working(self):\n \"\"\"Set the agent as a working agent.\"\"\"\n self.working_agent = True\n self.agent_lifecycle.save_agent(self)\n logger.info(f\"Agent {self.purpose} set as working agent.\")\n\n def is_working_agent(self):\n return self.working_agent\n\n def set_agent_deleted(self): \n \"\"\"Set the agent as deleted.\"\"\"\n self.working_agent = False\n self.current_status = \"❌ Deleted\"\n self.stopped = True\n logger.info(f\"Agent {self.purpose} set as deleted.\")\n\n def check_for_stopped(self):\n \"\"\"Check if the agent has been stopped.\"\"\"\n if self.stopped:\n raise AgentStoppedException(\"Agent stopped.\")\n\n def respond(self, input_text, evolve_count=0):\n \"\"\"\n Generate a response to the given input text.\n \"\"\"\n return self.response_handler.respond(input_text, evolve_count)"
},
{
"identifier": "OpenAIAPIWrapper",
"path": "integrations/openaiwrapper.py",
"snippet": "class OpenAIAPIWrapper:\n \"\"\"\n A wrapper class for OpenAI's API.\n \"\"\"\n\n def __init__(self, api_key, timeout=10):\n \"\"\"\n Initializes the OpenAIAPIWrapper instance.\n\n :param api_key: The API key for OpenAI.\n :param timeout: The timeout duration in seconds for API requests.\n \"\"\"\n self.api_key = api_key\n openai.api_key = api_key\n if API_BASE is not None:\n logging.debug(\"Accessing OPENAI at %s\" % API_BASE)\n openai.api_base = API_BASE\n self.timeout = timeout\n\n @memoize_to_sqlite(func_name=\"get_embedding\", filename=\"openai_embedding_cache.db\")\n def get_embedding(self, text):\n \"\"\"\n Retrieves the embedding for the given text.\n\n :param text: The text for which embedding is required.\n :return: The embedding for the given text.\n \"\"\"\n start_time = time.time()\n retries = 0\n\n while time.time() - start_time < self.timeout:\n try:\n return openai.Embedding.create(input=text, engine=ENGINE)\n except openai.error.OpenAIError as e:\n logging.error(f\"OpenAI API error: {e}\")\n retries += 1\n if retries >= MAX_RETRIES:\n raise\n time.sleep(RETRY_SLEEP_DURATION)\n\n if f\"{e}\".startswith(\"Rate limit\"):\n print(\"Rate limit reached... sleeping for 20 seconds\")\n start_time+=20\n time.sleep(20)\n raise TimeoutError(\"API call timed out\")\n\n def chat_completion(self, **kwargs):\n \"\"\"\n Generates a chat completion using OpenAI's API.\n\n :param kwargs: Keyword arguments for the chat completion API call.\n :return: The result of the chat completion API call.\n \"\"\"\n\n if 'model' not in kwargs:\n kwargs['model']=MODEL\n\n start_time = time.time()\n retries = 0\n\n while time.time() - start_time < self.timeout:\n try:\n res=openai.ChatCompletion.create(**kwargs)\n if isinstance(res, dict):\n if isinstance(res['choices'][0], dict):\n return res['choices'][0]['message']['content'].strip()\n return res['choices'][0].message['content'].strip()\n return res.choices[0].message['content'].strip()\n except openai.error.OpenAIError as e:\n logging.error(f\"OpenAI API error: {e}\")\n retries += 1\n if retries >= MAX_RETRIES:\n raise\n time.sleep(RETRY_SLEEP_DURATION)\n\n if f\"{e}\".startswith(\"Rate limit\"):\n print(\"Rate limit reached... sleeping for 20 seconds\")\n start_time+=20\n time.sleep(20)\n raise TimeoutError(\"API call timed out\")"
},
{
"identifier": "AgentSimilarity",
"path": "agents/agent_similarity.py",
"snippet": "class AgentSimilarity:\n def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):\n \"\"\"\n Initializes the AgentSimilarity object.\n\n :param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.\n :param agents: List of Agent objects.\n \"\"\"\n self.openai_wrapper = openai_wrapper\n self.agents = agents\n\n def get_embedding(self, text: str) -> np.ndarray:\n \"\"\"\n Retrieves the embedding for a given text.\n\n :param text: Text to get embedding for.\n :return: Embedding as a numpy array.\n \"\"\"\n try:\n response = self.openai_wrapper.get_embedding(text)\n if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:\n return np.array(response['data'][0]['embedding'])\n else:\n logger.exception(\"Invalid response format\")\n raise ValueError(\"Invalid response format\")\n except Exception as e:\n logger.exception(f\"Error retrieving embedding: {e}\")\n raise ValueError(f\"Error retrieving embedding: {e}\")\n\n\n def calculate_similarity_threshold(self) -> float:\n \"\"\"\n Calculates the 98th percentile of the similarity threshold across all agents.\n\n :return: 98th percentile of similarity threshold.\n \"\"\"\n try:\n embeddings=[]\n for agent in self.agents:\n if agent.purpose_embedding is None:\n agent.purpose_embedding = self.get_embedding(agent.purpose)\n\n embeddings.append(agent.purpose_embedding)\n\n if len(embeddings) < 250:\n return 0.999\n\n similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]\n return np.percentile(similarities, 98) if similarities else 0.999\n except Exception as e:\n logger.exception(f\"Error calculating similarity threshold: {e}\")\n raise ValueError(f\"Error calculating similarity threshold: {e}\")\n\n\n def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:\n \"\"\"\n Finds the closest agent based on the given purpose embedding.\n\n :param purpose_embedding: The embedding of the purpose to find the closest agent for.\n :return: Tuple of the closest agent and the highest similarity score.\n \"\"\"\n closest_agent: Optional[Agent] = None\n highest_similarity: float = -np.inf\n\n try:\n for agent in self.agents:\n if agent.purpose_embedding is None:\n agent.purpose_embedding = self.get_embedding(agent.purpose)\n\n similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]\n\n if similarity > highest_similarity:\n highest_similarity = similarity\n closest_agent = agent\n\n return closest_agent, highest_similarity\n except Exception as e:\n logger.exception(f\"Error finding closest agent: {e}\")\n raise ValueError(f\"Error finding closest agent: {e}\")"
},
{
"identifier": "AgentPersistenceManager",
"path": "agents/agent_persistence_manager.py",
"snippet": "class AgentPersistenceManager:\n def __init__(self, db_filename=\"agents.db\"):\n self.persistence = SQLiteAgentPersistence(db_filename)\n\n def save_agent(self, agent):\n \"\"\"\n Serialize and save the agent state if it is a working agent and not a prime agent.\n \"\"\"\n if agent.working_agent and not agent.is_prime:\n agent_dict = AgentSerializer.to_dict(agent)\n self.persistence.save_agent(agent_dict)\n\n def load_agent(self, purpose, agent_lifecycle, openai_wrapper):\n \"\"\"\n Load an agent with the given purpose from the database.\n \"\"\"\n serialized_agent = self.persistence.fetch_agent(purpose)\n if serialized_agent:\n return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)\n return None\n\n def load_all_agents(self, agent_lifecycle, openai_wrapper):\n \"\"\"\n Load all agents from the database.\n \"\"\"\n purposes = self.persistence.load_all_purposes()\n agents = []\n for purpose in purposes:\n agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)\n if agent:\n agents.append(agent)\n return agents"
},
{
"identifier": "PRIME_PROMPT",
"path": "prompt_management/prompts.py",
"snippet": "PRIME_PROMPT = \"This is the prime agent. You are only allowed to call other agents. Prime Agent's prompt may not be changed\""
},
{
"identifier": "PRIME_NAME",
"path": "prompt_management/prompts.py",
"snippet": "PRIME_NAME = \"Bootstrap Agent\""
},
{
"identifier": "PROMPT_ENGINEERING_SYSTEM_PROMPT",
"path": "prompt_management/prompts.py",
"snippet": "PROMPT_ENGINEERING_SYSTEM_PROMPT = \"You are a helpful assistant knowledgeable in prompt engineering.\""
},
{
"identifier": "PROMPT_ENGINEERING_TEMPLATE",
"path": "prompt_management/prompts.py",
"snippet": "PROMPT_ENGINEERING_TEMPLATE = (\n \"Using best practices in prompt engineering, create a detailed prompt for the goal '{goal}'. \"\n \"This generated prompt will be combined with the following context later (but must be generic and is forbidden to contain any of the following context): '{sample_input}'\\n\"\n \"Examples: {examples}. Aim for maximum 50 words. Important: Any problems must be solved through sample code or learned information provided in the prompt. \"\n \"Any sample code provided must be executable in isolation. Avoid unresolvable placeholders for URLs and API Keys. \"\n \"You can execute simple Python code to retrieve information from the web, use public APIs (No API Key!!), parse HTML code, or use regex instead. \"\n \"If you retrieve information from the web, avoid parsing HTML Code or use regex, just process the text data and print it out (As shown in the examples)!!! \"\n \"As long as the answer is somewhere in the output, and it is below 1k characters, its a perfect solution. Use real existing services and websites. Don't invent services or use example.com.\"\n)"
},
{
"identifier": "EXAMPLES",
"path": "prompt_management/prompts.py",
"snippet": "EXAMPLES = [\n \"Goal: Your purpose is to be able to write blog posts. Generated Prompt: You are an expert writer on the topic of blog posts.\",\n \"Goal: Your purpose is to count the words of the input. Generated Prompt: # You are a useful assistant that is able to count words. You can use the following code during execution to count word frequencies. Here is sample code, adopt as needed:```python\\nfrom collections import Counter\\n\\n\\nwords = text.split()\\nword_counts = Counter(words)\\nprint(word_counts)\\n```.\",\n \"Goal: Your purpose is to solve basic arithmetic problems. Generated Prompt: You are a proficient calculator. Here's a Python function to solve a basic arithmetic problem, here is some sample code, adopt as needed.: ```python\\nprint(eval(problem))\\n\\n# Example problem: What is 15 times 4?\\nprint(eval('15 * 4'))\\n```.\",\n \"Goal: Your purpose is to generate creative writing prompts. Generated Prompt: You are a creative muse who can come up with engaging and unique writing prompts. Provide an intriguing prompt for a science fiction story set in a distant galaxy.\",\n \"Goal: Your purpose is to translate sentences from English to Spanish. Generated Prompt: You are an efficient language translator. Translate the following sentence into Spanish: 'The sun rises early in the morning.'\",\n \"Goal: Your purpose is to query the Wikipedia API for the current president of a specified country and extract the relevant information. Generated Prompt: You are an adept information retriever. Use the code snippet to query the Wikipedia API for the current president of a specified country and extract the relevant information. Ensure the code is specific enough to identify the president's name. ```python\\nimport requests\\n\\ndef get_current_president(country):\\n S = requests.Session()\\n URL = f\\\"https://en.wikipedia.org/w/api.php\\\"\\n PARAMS = {\\n \\\"action\\\": \\\"query\\\",\\n \\\"format\\\": \\\"json\\\",\\n \\\"titles\\\": f\\\"President_of_{country}\\\",\\n \\\"prop\\\": \\\"extracts\\\",\\n \\\"exintro\\\": True,\\n \\\"explaintext\\\": True,\\n }\\n\\n response = S.get(url=URL, params=PARAMS).json()\\n page = next(iter(response[\\\"query\\\"][\\\"pages\\\"].values()))\\n extract = page[\\\"extract\\\"]\\n print(extract)\\n\\n# Example usage: get_current_president(\\\"France\\\")\\n```\"\n]"
}
] | import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
) | 4,106 |
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 20
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
|
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 20
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT}, | {"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)} | 7 | 2023-12-11 08:17:09+00:00 | 8k |
bytedance/ImageDream | extern/ldm_zero123/models/diffusion/ddim.py | [
{
"identifier": "norm_thresholding",
"path": "extern/ldm_zero123/models/diffusion/sampling_util.py",
"snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)"
},
{
"identifier": "renorm_thresholding",
"path": "extern/ldm_zero123/models/diffusion/sampling_util.py",
"snippet": "def renorm_thresholding(x0, value):\n # renorm\n pred_max = x0.max()\n pred_min = x0.min()\n pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1\n pred_x0 = 2 * pred_x0 - 1.0 # -1 ... 1\n\n s = torch.quantile(rearrange(pred_x0, \"b ... -> b (...)\").abs(), value, dim=-1)\n s.clamp_(min=1.0)\n s = s.view(-1, *((1,) * (pred_x0.ndim - 1)))\n\n # clip by threshold\n # pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max\n\n # temporary hack: numpy on cpu\n pred_x0 = (\n np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy())\n / s.cpu().numpy()\n )\n pred_x0 = torch.tensor(pred_x0).to(self.model.device)\n\n # re.renorm\n pred_x0 = (pred_x0 + 1.0) / 2.0 # 0 ... 1\n pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range\n return pred_x0"
},
{
"identifier": "spatial_norm_thresholding",
"path": "extern/ldm_zero123/models/diffusion/sampling_util.py",
"snippet": "def spatial_norm_thresholding(x0, value):\n # b c h w\n s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)\n return x0 * (value / s)"
},
{
"identifier": "extract_into_tensor",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "make_ddim_sampling_parameters",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt(\n (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)\n )\n if verbose:\n print(\n f\"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}\"\n )\n print(\n f\"For the chosen value of eta, which is {eta}, \"\n f\"this results in the following sigma_t schedule for ddim sampler {sigmas}\"\n )\n return sigmas, alphas, alphas_prev"
},
{
"identifier": "make_ddim_timesteps",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_timesteps(\n ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True\n):\n if ddim_discr_method == \"uniform\":\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == \"quad\":\n ddim_timesteps = (\n (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2\n ).astype(int)\n else:\n raise NotImplementedError(\n f'There is no ddim discretization method called \"{ddim_discr_method}\"'\n )\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f\"Selected timesteps for ddim sampler: {steps_out}\")\n return steps_out"
},
{
"identifier": "noise_like",
"path": "extern/ldm_zero123/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
}
] | from functools import partial
from einops import rearrange
from tqdm import tqdm
from extern.ldm_zero123.models.diffusion.sampling_util import (
norm_thresholding,
renorm_thresholding,
spatial_norm_thresholding,
)
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
)
import numpy as np
import torch | 3,727 | iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts
) # TODO: deterministic forward pass?
img = img_orig * mask + (1.0 - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates["x_inter"].append(img)
intermediates["pred_x0"].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat([unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(
self.model, e_t, x, t, c, **corrector_kwargs
)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose,
)
self.register_buffer("ddim_sigmas", ddim_sigmas)
self.register_buffer("ddim_alphas", ddim_alphas)
self.register_buffer("ddim_alphas_prev", ddim_alphas_prev)
self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev)
/ (1 - self.alphas_cumprod)
* (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
)
self.register_buffer(
"ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps
)
@torch.no_grad()
def sample(
self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.0,
mask=None,
x0=None,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold=None,
**kwargs,
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(
f"Warning: Got {cbs} conditionings but batch-size is {batch_size}"
)
else:
if conditioning.shape[0] != batch_size:
print(
f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}"
)
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(
conditioning,
size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask,
x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(
self,
cond,
shape,
x_T=None,
ddim_use_original_steps=False,
callback=None,
timesteps=None,
quantize_denoised=False,
mask=None,
x0=None,
img_callback=None,
log_every_t=100,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
t_start=-1,
):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = (
self.ddpm_num_timesteps
if ddim_use_original_steps
else self.ddim_timesteps
)
elif timesteps is not None and not ddim_use_original_steps:
subset_end = (
int(
min(timesteps / self.ddim_timesteps.shape[0], 1)
* self.ddim_timesteps.shape[0]
)
- 1
)
timesteps = self.ddim_timesteps[:subset_end]
timesteps = timesteps[:t_start]
intermediates = {"x_inter": [img], "pred_x0": [img]}
time_range = (
reversed(range(0, timesteps))
if ddim_use_original_steps
else np.flip(timesteps)
)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
# print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts
) # TODO: deterministic forward pass?
img = img_orig * mask + (1.0 - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates["x_inter"].append(img)
intermediates["pred_x0"].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat([unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(
self.model, e_t, x, t, c, **corrector_kwargs
)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None: | pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) | 0 | 2023-12-13 21:09:37+00:00 | 8k |
TencentARC/MotionCtrl | lvdm/modules/attention_temporal.py | [
{
"identifier": "checkpoint",
"path": "lvdm/common.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n try:\n return ckpt(func, *inputs)\n except:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "exists",
"path": "lvdm/common.py",
"snippet": "def exists(val):\n return val is not None"
},
{
"identifier": "uniq",
"path": "lvdm/common.py",
"snippet": "def uniq(arr):\n return{el: True for el in arr}.keys()"
},
{
"identifier": "default",
"path": "lvdm/common.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "max_neg_value",
"path": "lvdm/common.py",
"snippet": "def max_neg_value(t):\n return -torch.finfo(t.dtype).max"
},
{
"identifier": "init_",
"path": "lvdm/common.py",
"snippet": "def init_(tensor):\n dim = tensor.shape[-1]\n std = 1 / math.sqrt(dim)\n tensor.uniform_(-std, std)\n return tensor"
},
{
"identifier": "conv_nd",
"path": "lvdm/basics.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "lvdm/basics.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "lvdm/basics.py",
"snippet": "def normalization(channels, num_groups=32):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNormSpecific(num_groups, channels)"
}
] | import math
import torch
import torch as th
import torch.nn.functional as F
import xformers
import xformers.ops
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange, repeat
from lvdm.common import (
checkpoint,
exists,
uniq,
default,
max_neg_value,
init_
)
from lvdm.basics import (
conv_nd,
zero_module,
normalization
) | 4,083 |
if self.bidirectional_causal_attn:
mask_reverse = torch.triu(torch.ones([1, self.temporal_length, self.temporal_length], device=sim.device))
sim_reverse = sim.float().masked_fill(mask_reverse == 0, max_neg_value)
attn_reverse = sim_reverse.softmax(dim=-1)
out_reverse = einsum('b i j, b j d -> b i d', attn_reverse, v)
out += out_reverse
if self.use_relative_position:
v2 = self.relative_position_v(len_q, len_v)
out2 = einsum('b t s, t s d -> b t d', attn, v2) # TODO check
out += out2 # TODO check:先add还是先merge head?先计算rpr,on split head之后的数据,然后再merge。
out = rearrange(out, '(b h) n d -> b n (h d)', h=nh) # merge head
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
sa_shared_kv=False, shared_type='only_first', **kwargs,):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.sa_shared_kv = sa_shared_kv
assert(shared_type in ['only_first', 'all_frames', 'first_and_prev', 'only_prev', 'full', 'causal', 'full_qkv'])
self.shared_type = shared_type
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
if XFORMERS_IS_AVAILBLE:
self.forward = self.efficient_forward
def forward(self, x, context=None, mask=None):
h = self.heads
b = x.shape[0]
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if self.sa_shared_kv:
if self.shared_type == 'only_first':
k,v = map(lambda xx: rearrange(xx[0].unsqueeze(0), 'b n c -> (b n) c').unsqueeze(0).repeat(b,1,1),
(k,v))
else:
raise NotImplementedError
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def efficient_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
b, _, _ = q.shape
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, t.shape[1], self.heads, self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b * self.heads, t.shape[1], self.dim_head)
.contiguous(),
(q, k, v),
)
# actually compute the attention, what we cannot get enough of
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
if exists(mask):
raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, self.heads, out.shape[1], self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b, out.shape[1], self.heads * self.dim_head)
)
return self.to_out(out)
class VideoSpatialCrossAttention(CrossAttention):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0):
super().__init__(query_dim, context_dim, heads, dim_head, dropout)
def forward(self, x, context=None, mask=None):
b, c, t, h, w = x.shape
if context is not None:
context = context.repeat(t, 1, 1)
x = super.forward(spatial_attn_reshape(x), context=context) + x
return spatial_attn_reshape_back(x,b,h)
class BasicTransformerBlockST(nn.Module):
def __init__(self,
# Spatial Stuff
dim,
n_heads,
d_head,
dropout=0.,
context_dim=None,
gated_ff=True,
|
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
# ---------------------------------------------------------------------------------------------------
class RelativePosition(nn.Module):
""" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
def __init__(self, num_units, max_relative_position):
super().__init__()
self.num_units = num_units
self.max_relative_position = max_relative_position
self.embeddings_table = nn.Parameter(th.Tensor(max_relative_position * 2 + 1, num_units))
nn.init.xavier_uniform_(self.embeddings_table)
def forward(self, length_q, length_k):
device = self.embeddings_table.device
range_vec_q = th.arange(length_q, device=device)
range_vec_k = th.arange(length_k, device=device)
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
distance_mat_clipped = th.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
final_mat = distance_mat_clipped + self.max_relative_position
# final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)
# final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)
final_mat = final_mat.long()
embeddings = self.embeddings_table[final_mat]
return embeddings
class TemporalCrossAttention(nn.Module):
def __init__(self,
query_dim,
context_dim=None,
heads=8,
dim_head=64,
dropout=0.,
temporal_length=None, # For relative positional representation and image-video joint training.
image_length=None, # For image-video joint training.
use_relative_position=False, # whether use relative positional representation in temporal attention.
img_video_joint_train=False, # For image-video joint training.
use_tempoal_causal_attn=False,
bidirectional_causal_attn=False,
tempoal_attn_type=None,
joint_train_mode="same_batch",
**kwargs,
):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.context_dim = context_dim
self.scale = dim_head ** -0.5
self.heads = heads
self.temporal_length = temporal_length
self.use_relative_position = use_relative_position
self.img_video_joint_train = img_video_joint_train
self.bidirectional_causal_attn = bidirectional_causal_attn
self.joint_train_mode = joint_train_mode
assert(joint_train_mode in ["same_batch", "diff_batch"])
self.tempoal_attn_type = tempoal_attn_type
if bidirectional_causal_attn:
assert use_tempoal_causal_attn
if tempoal_attn_type:
assert(tempoal_attn_type in ['sparse_causal', 'sparse_causal_first'])
assert(not use_tempoal_causal_attn)
assert(not (img_video_joint_train and (self.joint_train_mode == "same_batch")))
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
assert(not (img_video_joint_train and (self.joint_train_mode == "same_batch") and use_tempoal_causal_attn))
if img_video_joint_train:
if self.joint_train_mode == "same_batch":
mask = torch.ones([1, temporal_length+image_length, temporal_length+image_length])
# mask[:, image_length:, :] = 0
# mask[:, :, image_length:] = 0
mask[:, temporal_length:, :] = 0
mask[:, :, temporal_length:] = 0
self.mask = mask
else:
self.mask = None
elif use_tempoal_causal_attn:
# normal causal attn
self.mask = torch.tril(torch.ones([1, temporal_length, temporal_length]))
elif tempoal_attn_type == 'sparse_causal':
# all frames interact with only the `prev` & self frame
mask1 = torch.tril(torch.ones([1, temporal_length, temporal_length])).bool() # true indicates keeping
mask2 = torch.zeros([1, temporal_length, temporal_length]) # initialize to same shape with mask1
mask2[:,2:temporal_length, :temporal_length-2] = torch.tril(torch.ones([1,temporal_length-2, temporal_length-2]))
mask2=(1-mask2).bool() # false indicates masking
self.mask = mask1 & mask2
elif tempoal_attn_type == 'sparse_causal_first':
# all frames interact with only the `first` & self frame
mask1 = torch.tril(torch.ones([1, temporal_length, temporal_length])).bool() # true indicates keeping
mask2 = torch.zeros([1, temporal_length, temporal_length])
mask2[:,2:temporal_length, 1:temporal_length-1] = torch.tril(torch.ones([1,temporal_length-2, temporal_length-2]))
mask2=(1-mask2).bool() # false indicates masking
self.mask = mask1 & mask2
else:
self.mask = None
if use_relative_position:
assert(temporal_length is not None)
self.relative_position_k = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
self.relative_position_v = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
nn.init.constant_(self.to_q.weight, 0)
nn.init.constant_(self.to_k.weight, 0)
nn.init.constant_(self.to_v.weight, 0)
nn.init.constant_(self.to_out[0].weight, 0)
nn.init.constant_(self.to_out[0].bias, 0)
def forward(self, x, context=None, mask=None):
# if context is None:
# print(f'[Temp Attn] x={x.shape},context=None')
# else:
# print(f'[Temp Attn] x={x.shape},context={context.shape}')
nh = self.heads
out = x
q = self.to_q(out)
# if context is not None:
# print(f'temporal context 1 ={context.shape}')
# print(f'x={x.shape}')
context = default(context, x)
# print(f'temporal context 2 ={context.shape}')
k = self.to_k(context)
v = self.to_v(context)
# print(f'q ={q.shape},k={k.shape}')
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=nh), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if self.use_relative_position:
len_q, len_k, len_v = q.shape[1], k.shape[1], v.shape[1]
k2 = self.relative_position_k(len_q, len_k)
sim2 = einsum('b t d, t s d -> b t s', q, k2) * self.scale # TODO check
sim += sim2
# print('mask',mask)
if exists(self.mask):
if mask is None:
mask = self.mask.to(sim.device)
else:
mask = self.mask.to(sim.device).bool() & mask #.to(sim.device)
else:
mask = mask
# if self.img_video_joint_train:
# # process mask (make mask same shape with sim)
# c, h, w = mask.shape
# c, t, s = sim.shape
# # assert(h == w and t == s),f"mask={mask.shape}, sim={sim.shape}, h={h}, w={w}, t={t}, s={s}"
# if h > t:
# mask = mask[:, :t, :]
# elif h < t: # pad zeros to mask (no attention) only initial mask =1 area compute weights
# mask_ = torch.zeros([c,t,w]).to(mask.device)
# mask_[:, :h, :] = mask
# mask = mask_
# c, h, w = mask.shape
# if w > s:
# mask = mask[:, :, :s]
# elif w < s: # pad zeros to mask
# mask_ = torch.zeros([c,h,s]).to(mask.device)
# mask_[:, :, :w] = mask
# mask = mask_
# max_neg_value = -torch.finfo(sim.dtype).max
# sim = sim.float().masked_fill(mask == 0, max_neg_value)
if mask is not None:
max_neg_value = -1e9
sim = sim + (1-mask.float()) * max_neg_value # 1=masking,0=no masking
# print('sim after masking: ', sim)
# if torch.isnan(sim).any() or torch.isinf(sim).any() or (not sim.any()):
# print(f'sim [after masking], isnan={torch.isnan(sim).any()}, isinf={torch.isinf(sim).any()}, allzero={not sim.any()}')
attn = sim.softmax(dim=-1)
# print('attn after softmax: ', attn)
# if torch.isnan(attn).any() or torch.isinf(attn).any() or (not attn.any()):
# print(f'attn [after softmax], isnan={torch.isnan(attn).any()}, isinf={torch.isinf(attn).any()}, allzero={not attn.any()}')
# attn = torch.where(torch.isnan(attn), torch.full_like(attn,0), attn)
# if torch.isinf(attn.detach()).any():
# import pdb;pdb.set_trace()
# if torch.isnan(attn.detach()).any():
# import pdb;pdb.set_trace()
out = einsum('b i j, b j d -> b i d', attn, v)
if self.bidirectional_causal_attn:
mask_reverse = torch.triu(torch.ones([1, self.temporal_length, self.temporal_length], device=sim.device))
sim_reverse = sim.float().masked_fill(mask_reverse == 0, max_neg_value)
attn_reverse = sim_reverse.softmax(dim=-1)
out_reverse = einsum('b i j, b j d -> b i d', attn_reverse, v)
out += out_reverse
if self.use_relative_position:
v2 = self.relative_position_v(len_q, len_v)
out2 = einsum('b t s, t s d -> b t d', attn, v2) # TODO check
out += out2 # TODO check:先add还是先merge head?先计算rpr,on split head之后的数据,然后再merge。
out = rearrange(out, '(b h) n d -> b n (h d)', h=nh) # merge head
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
sa_shared_kv=False, shared_type='only_first', **kwargs,):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.sa_shared_kv = sa_shared_kv
assert(shared_type in ['only_first', 'all_frames', 'first_and_prev', 'only_prev', 'full', 'causal', 'full_qkv'])
self.shared_type = shared_type
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
if XFORMERS_IS_AVAILBLE:
self.forward = self.efficient_forward
def forward(self, x, context=None, mask=None):
h = self.heads
b = x.shape[0]
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if self.sa_shared_kv:
if self.shared_type == 'only_first':
k,v = map(lambda xx: rearrange(xx[0].unsqueeze(0), 'b n c -> (b n) c').unsqueeze(0).repeat(b,1,1),
(k,v))
else:
raise NotImplementedError
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def efficient_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
b, _, _ = q.shape
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, t.shape[1], self.heads, self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b * self.heads, t.shape[1], self.dim_head)
.contiguous(),
(q, k, v),
)
# actually compute the attention, what we cannot get enough of
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
if exists(mask):
raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, self.heads, out.shape[1], self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b, out.shape[1], self.heads * self.dim_head)
)
return self.to_out(out)
class VideoSpatialCrossAttention(CrossAttention):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0):
super().__init__(query_dim, context_dim, heads, dim_head, dropout)
def forward(self, x, context=None, mask=None):
b, c, t, h, w = x.shape
if context is not None:
context = context.repeat(t, 1, 1)
x = super.forward(spatial_attn_reshape(x), context=context) + x
return spatial_attn_reshape_back(x,b,h)
class BasicTransformerBlockST(nn.Module):
def __init__(self,
# Spatial Stuff
dim,
n_heads,
d_head,
dropout=0.,
context_dim=None,
gated_ff=True, | checkpoint=True, | 0 | 2023-12-06 07:27:45+00:00 | 8k |
TianxingWu/FreeInit | examples/AnimateDiff/animatediff/models/unet_blocks.py | [
{
"identifier": "Transformer3DModel",
"path": "examples/AnimateDiff/animatediff/models/attention.py",
"snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n # Define input layers\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n\n # Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n if use_linear_projection:\n self.proj_out = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):\n # Input\n assert hidden_states.dim() == 5, f\"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}.\"\n video_length = hidden_states.shape[2]\n hidden_states = rearrange(hidden_states, \"b c f h w -> (b f) c h w\")\n encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)\n\n batch, channel, height, weight = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = self.proj_in(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n # Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n video_length=video_length\n )\n\n # Output\n if not self.use_linear_projection:\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n hidden_states = self.proj_out(hidden_states)\n else:\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n\n output = hidden_states + residual\n\n output = rearrange(output, \"(b f) c h w -> b c f h w\", f=video_length)\n if not return_dict:\n return (output,)\n\n return Transformer3DModelOutput(sample=output)"
},
{
"identifier": "Downsample3D",
"path": "examples/AnimateDiff/animatediff/models/resnet.py",
"snippet": "class Downsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n\n if use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n raise NotImplementedError\n\n def forward(self, hidden_states):\n assert hidden_states.shape[1] == self.channels\n if self.use_conv and self.padding == 0:\n raise NotImplementedError\n\n assert hidden_states.shape[1] == self.channels\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "ResnetBlock3D",
"path": "examples/AnimateDiff/animatediff/models/resnet.py",
"snippet": "class ResnetBlock3D(nn.Module):\n def __init__(\n self,\n *,\n in_channels,\n out_channels=None,\n conv_shortcut=False,\n dropout=0.0,\n temb_channels=512,\n groups=32,\n groups_out=None,\n pre_norm=True,\n eps=1e-6,\n non_linearity=\"swish\",\n time_embedding_norm=\"default\",\n output_scale_factor=1.0,\n use_in_shortcut=None,\n use_inflated_groupnorm=None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.time_embedding_norm = time_embedding_norm\n self.output_scale_factor = output_scale_factor\n\n if groups_out is None:\n groups_out = groups\n\n assert use_inflated_groupnorm != None\n if use_inflated_groupnorm:\n self.norm1 = InflatedGroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n else:\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n time_emb_proj_out_channels = out_channels\n elif self.time_embedding_norm == \"scale_shift\":\n time_emb_proj_out_channels = out_channels * 2\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n\n self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)\n else:\n self.time_emb_proj = None\n\n if use_inflated_groupnorm:\n self.norm2 = InflatedGroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n else:\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if non_linearity == \"swish\":\n self.nonlinearity = lambda x: F.silu(x)\n elif non_linearity == \"mish\":\n self.nonlinearity = Mish()\n elif non_linearity == \"silu\":\n self.nonlinearity = nn.SiLU()\n\n self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, input_tensor, temb):\n hidden_states = input_tensor\n\n hidden_states = self.norm1(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.conv1(hidden_states)\n\n if temb is not None:\n temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = self.conv_shortcut(input_tensor)\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor"
},
{
"identifier": "Upsample3D",
"path": "examples/AnimateDiff/animatediff/models/resnet.py",
"snippet": "class Upsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n\n conv = None\n if use_conv_transpose:\n raise NotImplementedError\n elif use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, hidden_states, output_size=None):\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n raise NotImplementedError\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n # if self.use_conv:\n # if self.name == \"conv\":\n # hidden_states = self.conv(hidden_states)\n # else:\n # hidden_states = self.Conv2d_0(hidden_states)\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "get_motion_module",
"path": "examples/AnimateDiff/animatediff/models/motion_module.py",
"snippet": "def get_motion_module(\n in_channels,\n motion_module_type: str, \n motion_module_kwargs: dict\n):\n if motion_module_type == \"Vanilla\":\n return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,) \n else:\n raise ValueError"
}
] | import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module | 5,160 |
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
output_states = ()
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
output_states = ()
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample: | self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)]) | 3 | 2023-12-12 13:11:24+00:00 | 8k |
allenai/unified-io-2 | t5x/examples/unified_io/data/nlp_instruction_following.py | [
{
"identifier": "MULTITASK_TFDS_DATA_DIR",
"path": "t5x/examples/unified_io/config.py",
"snippet": "MULTITASK_TFDS_DATA_DIR = None"
},
{
"identifier": "get_default_vocabulary",
"path": "t5x/examples/unified_io/data/data_utils.py",
"snippet": "def get_default_vocabulary():\n if config.TOKENIZER == \"t5x-v1\":\n return seqio.SentencePieceVocabulary(\n \"gs://t5-data/vocabs/cc_all.32000/sentencepiece.model\", 1000)\n elif config.TOKENIZER == \"llama\":\n if not config.LLAMA_TOKENIZER_PATH:\n raise ValueError(\"`config.LLAMA_TOKENIZER_PATH` should point to the LLAMA tokenizer`\")\n return uio_vocab.SentencePieceVocabulary(\n config.LLAMA_TOKENIZER_PATH,\n extra_ids=DEFAULT_EXTRA_IDS,\n reverse_extra_ids=True,\n modality_extra_id_n_frames=MODALITY_EXTRA_ID_N_FRAMES,\n hack_to_t5_start_tokens=True,\n prefix_as_special_token=True,\n )\n else:\n raise ValueError(config.TOKENIZER)"
},
{
"identifier": "apply_keyword_prompt",
"path": "t5x/examples/unified_io/data/data_utils.py",
"snippet": "def apply_keyword_prompt(prompt, allow_missing=False, **kwargs):\n \"\"\"Fills in the brackted keywords in `prompt` with the keywords in `kwargs`\"\"\"\n return valid_regex_replace(prompt, {\"{\"+k+\"}\": v for k, v in kwargs.items()}, allow_missing)"
},
{
"identifier": "random_element",
"path": "t5x/examples/unified_io/data/data_utils.py",
"snippet": "def random_element(vec, seed=None):\n if isinstance(vec, list):\n if len(vec) == 1:\n return vec[0]\n assert len(vec) > 0\n vec = tf.constant(vec)\n if seed is not None:\n ix = tf.random.stateless_uniform((), seed, 0, tf.shape(vec)[0], tf.int32)\n else:\n ix = tf.random.uniform((), 0, tf.shape(vec)[0], tf.int32)\n return vec[ix]"
},
{
"identifier": "Prompt",
"path": "t5x/examples/unified_io/data/prompt_definition.py",
"snippet": "class Prompt:\n \"\"\"Configurable interface for getting prompts\"\"\"\n\n def __init__(self, original_flag=True, revised_original_flag=False, manual_flag=True,\n gpt3_flag=True, single_prompt=False, dbg=None):\n self.prompt_list = []\n self.original_flag = original_flag\n self.revised_original_flag = revised_original_flag\n self.manual_flag = manual_flag\n self.gpt3_flag = gpt3_flag\n self.single_prompt = single_prompt\n self.dbg = dbg\n\n def get_prompt_list(self, task_name, dataset_name):\n if self.dbg:\n logging.info(f\"Using dbg prmopt {self.dbg}\")\n return [self.dbg]\n prompt_list = []\n if self.original_flag:\n if self.revised_original_flag and 'revised_original' in PROMPT_DICT[task_name]:\n prompt_list += PROMPT_DICT[task_name]['revised_original']\n else:\n prompt_list += PROMPT_DICT[task_name]['original']\n if self.revised_original_flag and 'revised_original' in PROMPT_DICT[dataset_name]:\n prompt_list += PROMPT_DICT[dataset_name]['revised_original']\n else:\n prompt_list += PROMPT_DICT[dataset_name]['original']\n if self.manual_flag:\n if 'manual' in PROMPT_DICT[task_name]:\n prompt_list += PROMPT_DICT[task_name]['manual']\n if 'manual' in PROMPT_DICT[dataset_name]:\n prompt_list += PROMPT_DICT[dataset_name]['manual']\n if self.gpt3_flag:\n if 'gpt3' in PROMPT_DICT[task_name]:\n prompt_list += PROMPT_DICT[task_name]['gpt3']\n \n if 'gpt3' in PROMPT_DICT[dataset_name]: \n prompt_list += PROMPT_DICT[dataset_name]['gpt3']\n if not prompt_list:\n raise ValueError(f\"No prompts for {task_name}/{dataset_name}\")\n if self.single_prompt:\n logging.info(f\"Using prompt \\\"{prompt_list[0]}\\\" for {task_name} {dataset_name}\")\n return prompt_list[:1]\n return prompt_list"
},
{
"identifier": "TRUNCATE",
"path": "t5x/examples/unified_io/data/prompt_dict.py",
"snippet": "TRUNCATE = \"[TR]\""
},
{
"identifier": "unified_io_preprocessor",
"path": "t5x/examples/unified_io/modality_processing.py",
"snippet": "@seqio.map_over_dataset\ndef unified_io_preprocessor(features, output_features, sequence_length):\n \"\"\"General pre-processing function that builds models features from multi-modal inputs.\n\n This function should be used as the last pre-processor for all tasks, it calls the\n modality-specific preprocess modules produced by `get_input_modalities` and\n `get_target_modalities`to apply model-specific preprocess that needs to be done before\n the tasks are combined into a mixture.\n\n Args:\n features: dictionary with a subset of the following fields:\n text_inputs: int32 array of tokenized text inputs (without EOS) or tf.string scalar,\n the prompt/input text to the model\n text_targets: int32 array tokenized of text inputs (without EOS) or\n tf.string scalar, the the output text to generate.\n Can also be a list of string tensors or ragged int32 tensor to represent\n multiple correct answer options\n image_inputs: RGB image size `IMAGE_INPUT_SIZE` in float32 format, the input image\n image_input_masks: image_mask for size `IMAGE_INPUT_SIZE` marking pixels to\n included iff `image_inputs` is included\n audio_inputs: Audio spectrogram [256, 128]\n audio_inputs_masks: Audio spectrogram mask\n video_inputs: RGB by time video in float32 format\n video_inputs_masks: 2D mask of the same height/width of the video\n audio_history_inputs: Audio spectrogram history [N, 256, 128]\n audio_history_input_masks: Masks for audio_history_inputs\n image_targets: (optional) RGB image of `IMAGE_TARGET_SIZE` in float32 format, the target\n image to generate\n image_target_masks: (optional) image_mask for size `IMAGE_TARGET_SIZE` or `IMAGE_INPUT_SIZE`\n included iff `image_targets` is included.\n If of `IMAGE_INPUT_SIZE`, the mask will be applied as if re-scaled to\n `IMAGE_TARGET_SIZE`, but we can avoid some compute/rounding errors by\n avoiding explicitly rescaling it in this case.\n audio_targets: Audio spectrogram target\n audio_targets_masks: Target audio mask\n\n eval: sub-dictionary of features that should be passed to metric functions\n (e.g., ground truth labels)\n choices: List of strings or ragged int32 tensor of text answer options\n \"\"\"\n input_features = {}\n input_modalities = get_input_modalities()\n for k, v in input_modalities.items():\n input_features[k] = v.preprocess_inputs(features, output_features, sequence_length)\n\n target_features = {}\n for k, v in get_target_modalities().items():\n target_features[k] = v.preprocess_inputs(features, output_features, sequence_length)\n\n # Features that might be needed by metric functions or for evaluations\n if \"meta\" in features:\n meta = features[\"meta\"]\n else:\n meta = {}\n for k in features:\n if k.startswith(\"meta/\"):\n meta[k] = features[k]\n\n out = dict(\n inputs=input_features,\n targets=target_features,\n meta=meta\n )\n\n # If there are answer choices, they need to be passed through to the model\n if \"choices\" in features:\n out[\"choices\"] = features[\"choices\"]\n\n out = traverse_util.flatten_dict(out, keep_empty_nodes=False, sep=\"/\")\n return out"
},
{
"identifier": "OUTPUT_FEATURES",
"path": "t5x/examples/unified_io/modality_processing.py",
"snippet": "OUTPUT_FEATURES = {}"
}
] | import functools
import re
import seqio
import tensorflow as tf
from seqio import TaskRegistry
from seqio.preprocessors import rekey
from t5x.examples.unified_io.config import MULTITASK_TFDS_DATA_DIR
from t5x.examples.unified_io.data.data_utils import get_default_vocabulary, apply_keyword_prompt, \
random_element
from t5x.examples.unified_io.data.prompt_definition import Prompt
from t5x.examples.unified_io.data.prompt_dict import TRUNCATE
from t5x.examples.unified_io.modality_processing import unified_io_preprocessor, OUTPUT_FEATURES | 4,964 | 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115,
1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131,
1132, 1133, 1134, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1168, 1169, 1170,
1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1218,
1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266,
1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1323, 1324, 1329, 1330, 1334, 1335, 1350, 1351, 1352, 1353, 1365, 1367, 1370, 1371, 1373, 1374,
1375, 1376, 1377, 1395, 1396, 1397, 1402, 1414, 1432, 1433, 1435, 1436, 1490, 1491, 1492, 1493,
1494, 1496, 1497, 1514, 1537, 1538, 1539, 1543, 1544, 1545, 1546, 1561, 1569, 1570, 1571, 1574,
1575, 1576, 1577, 1588, 1591, 1610, 1611, 1616, 1617, 1618, 1619, 1620, 1621, 1626, 1627, 1628,
1629, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1654, 1655, 1662, 1663, 1666, 1667, 1676, 1677,
1685, 1686, 1689, 1690, 1691, 1692
]
def filter_by_len(ds, sequence_length):
@seqio.map_over_dataset
def tokenize(ex):
voc = get_default_vocabulary()
ex["text_inputs"] = voc.encode_tf(ex["text_inputs"])
ex["text_targets"] = voc.encode_tf(ex["text_targets"])
return ex
ds = tokenize(ds)
def _filter(ex):
# Leave one space for EOS
return (
(len(ex["text_inputs"]) <= sequence_length["text_inputs"] - 1) and
(len(ex["text_targets"]) <= sequence_length["text_targets"] - 1)
)
return ds.filter(_filter)
@seqio.map_over_dataset
def tokenize_with_truncate(x, sequence_length):
"""Tokenize x but truncate from the special TRUNCATE symbol not the end"""
voc = get_default_vocabulary()
text_inputs = x["text_inputs"]
parts = tf.strings.split(text_inputs, TRUNCATE, maxsplit=2)
if tf.shape(parts)[0] == 1:
x["text_inputs_pretokenized"] = text_inputs
x["text_inputs"] = voc.encode_tf(parts[0])
else:
x["text_inputs_pretokenized"] = tf.strings.join([parts[0], parts[1]], "")
to_truncate = voc.encode_tf(parts[0])
suffix = voc.encode_tf(parts[1])
max_input_len = sequence_length["text_inputs"]
n = max_input_len - tf.shape(suffix)[0] - 1 # -1 for the EOS
x["text_inputs"] = tf.concat([to_truncate[:n], suffix], 0)
return x
def filter_non_english(ds, source):
if source == "NIv2":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], f"task({'|'.join(str(x) for x in NI_NON_ENGLISH_TASKS)})_.*")
elif source == "Flan2021":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], "(wmt[0-9]*_.*)|para_crawl_enes")
else:
return ds
return ds.filter(_fn)
@seqio.map_over_dataset
def preprocess_flan(ex, name):
return dict(
text_inputs=tf.strings.join(["[Text] [S] ", ex["inputs"]]),
text_targets=ex["targets"],
example_id=tf.strings.join([name, tf.strings.as_string(ex["example_num"])], "-")
)
def add_flan(name):
full_name = f"flan2_{name.lower()}"
TaskRegistry.add(
full_name,
source=seqio.TfdsDataSource(
tfds_name=f"{full_name}:1.0.0",
tfds_data_dir=MULTITASK_TFDS_DATA_DIR,
splits={
"train": "train[2000:]",
"validation": "train[:2000]"
}
),
preprocessors=[
functools.partial(filter_non_english, source=name),
functools.partial(preprocess_flan, name=full_name),
filter_by_len,
unified_io_preprocessor,
],
output_features=OUTPUT_FEATURES,
)
FLAN_DATASETS = ["Flan2021", "T0", "NIv2", "CoT", "Dialog"]
for dataset in FLAN_DATASETS:
add_flan(dataset)
# Weights from https://github.com/google-research/FLAN/blob/main/flan/v2/run_example.py#L65-L73
# git commit 7b33ac0
seqio.MixtureRegistry.add(
'flan2',
tasks=[
('flan2_flan2021', 0.4), # mixing weight = 40%
('flan2_t0', 0.32), # mixing weight = 32%
('flan2_niv2', 0.2), # mixing weight = 20%
('flan2_cot', 0.05), # mixing weight = 5%
('flan2_dialog', 0.03), # mixing weight = 3%
])
def preprocess_instruction_context(ds, dataset_name):
|
# Extracted from the language labels from the NLI README
NI_NON_ENGLISH_TASKS = [
86, 117, 171, 172, 173, 174, 175, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
262, 263, 264, 265, 266, 271, 272, 273, 312, 313, 314, 315, 334, 336, 338, 394, 395, 396, 406,
407, 408, 409, 410, 411, 412, 414, 415, 416, 417, 424, 425, 426, 427, 432, 433, 434, 435, 436,
437, 438, 439, 440, 441, 446, 447, 448, 449, 450, 451, 452, 463, 464, 465, 466, 467, 468, 473,
474, 479, 480, 481, 482, 483, 484, 485, 486, 487, 524, 525, 526, 527, 528, 529, 530, 531, 532,
533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 548, 549, 551, 552, 553,
554, 555, 556, 557, 558, 559, 561, 562, 601, 604, 612, 634, 635, 643, 644, 650, 651, 652, 653,
654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 680, 762, 763, 764, 765, 771, 772, 773, 774,
775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793,
794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812,
813, 814, 815, 816, 817, 818, 829, 830, 831, 832, 836, 837, 838, 839, 840, 841, 842, 872, 873,
877, 878, 896, 910, 911, 912, 913, 914, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948,
949, 950, 951, 952, 953, 954, 960, 961, 962, 968, 969, 974, 975, 976, 977, 978, 979, 980, 981,
982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000,
1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016,
1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032,
1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064,
1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080,
1081, 1082, 1083, 1084, 1085, 1086, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099,
1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115,
1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131,
1132, 1133, 1134, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1168, 1169, 1170,
1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1218,
1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266,
1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1323, 1324, 1329, 1330, 1334, 1335, 1350, 1351, 1352, 1353, 1365, 1367, 1370, 1371, 1373, 1374,
1375, 1376, 1377, 1395, 1396, 1397, 1402, 1414, 1432, 1433, 1435, 1436, 1490, 1491, 1492, 1493,
1494, 1496, 1497, 1514, 1537, 1538, 1539, 1543, 1544, 1545, 1546, 1561, 1569, 1570, 1571, 1574,
1575, 1576, 1577, 1588, 1591, 1610, 1611, 1616, 1617, 1618, 1619, 1620, 1621, 1626, 1627, 1628,
1629, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1654, 1655, 1662, 1663, 1666, 1667, 1676, 1677,
1685, 1686, 1689, 1690, 1691, 1692
]
def filter_by_len(ds, sequence_length):
@seqio.map_over_dataset
def tokenize(ex):
voc = get_default_vocabulary()
ex["text_inputs"] = voc.encode_tf(ex["text_inputs"])
ex["text_targets"] = voc.encode_tf(ex["text_targets"])
return ex
ds = tokenize(ds)
def _filter(ex):
# Leave one space for EOS
return (
(len(ex["text_inputs"]) <= sequence_length["text_inputs"] - 1) and
(len(ex["text_targets"]) <= sequence_length["text_targets"] - 1)
)
return ds.filter(_filter)
@seqio.map_over_dataset
def tokenize_with_truncate(x, sequence_length):
"""Tokenize x but truncate from the special TRUNCATE symbol not the end"""
voc = get_default_vocabulary()
text_inputs = x["text_inputs"]
parts = tf.strings.split(text_inputs, TRUNCATE, maxsplit=2)
if tf.shape(parts)[0] == 1:
x["text_inputs_pretokenized"] = text_inputs
x["text_inputs"] = voc.encode_tf(parts[0])
else:
x["text_inputs_pretokenized"] = tf.strings.join([parts[0], parts[1]], "")
to_truncate = voc.encode_tf(parts[0])
suffix = voc.encode_tf(parts[1])
max_input_len = sequence_length["text_inputs"]
n = max_input_len - tf.shape(suffix)[0] - 1 # -1 for the EOS
x["text_inputs"] = tf.concat([to_truncate[:n], suffix], 0)
return x
def filter_non_english(ds, source):
if source == "NIv2":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], f"task({'|'.join(str(x) for x in NI_NON_ENGLISH_TASKS)})_.*")
elif source == "Flan2021":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], "(wmt[0-9]*_.*)|para_crawl_enes")
else:
return ds
return ds.filter(_fn)
@seqio.map_over_dataset
def preprocess_flan(ex, name):
return dict(
text_inputs=tf.strings.join(["[Text] [S] ", ex["inputs"]]),
text_targets=ex["targets"],
example_id=tf.strings.join([name, tf.strings.as_string(ex["example_num"])], "-")
)
def add_flan(name):
full_name = f"flan2_{name.lower()}"
TaskRegistry.add(
full_name,
source=seqio.TfdsDataSource(
tfds_name=f"{full_name}:1.0.0",
tfds_data_dir=MULTITASK_TFDS_DATA_DIR,
splits={
"train": "train[2000:]",
"validation": "train[:2000]"
}
),
preprocessors=[
functools.partial(filter_non_english, source=name),
functools.partial(preprocess_flan, name=full_name),
filter_by_len,
unified_io_preprocessor,
],
output_features=OUTPUT_FEATURES,
)
FLAN_DATASETS = ["Flan2021", "T0", "NIv2", "CoT", "Dialog"]
for dataset in FLAN_DATASETS:
add_flan(dataset)
# Weights from https://github.com/google-research/FLAN/blob/main/flan/v2/run_example.py#L65-L73
# git commit 7b33ac0
seqio.MixtureRegistry.add(
'flan2',
tasks=[
('flan2_flan2021', 0.4), # mixing weight = 40%
('flan2_t0', 0.32), # mixing weight = 32%
('flan2_niv2', 0.2), # mixing weight = 20%
('flan2_cot', 0.05), # mixing weight = 5%
('flan2_dialog', 0.03), # mixing weight = 3%
])
def preprocess_instruction_context(ds, dataset_name): | context_prompts = Prompt().get_prompt_list("NLP Instruction Context", dataset_name) | 4 | 2023-12-12 20:23:33+00:00 | 8k |
SafeAILab/EAGLE | evaluation/gen_ea_answer_vicuna.py | [
{
"identifier": "EaModel",
"path": "model/ea_model.py",
"snippet": "class EaModel(nn.Module):\n\n def __init__(\n self,\n base_model,\n base_model_name_or_path,\n ea_model_path,\n ):\n\n super().__init__()\n self.base_model = base_model\n self.config = base_model.config\n self.hidden_size = base_model.lm_head.weight.shape[-1]\n self.vocab_size = base_model.lm_head.weight.shape[0]\n self.base_model_name_or_path = base_model_name_or_path\n self.tokenizer = AutoTokenizer.from_pretrained(self.base_model_name_or_path)\n config = EConfig.from_pretrained(ea_model_path)\n self.ea_layer = Model(config)\n\n low_memory=False\n\n device = base_model.model.layers[-1].self_attn.q_proj.weight.device\n if device!=base_model.lm_head.weight.device:\n self.ea_layer.diff_device = True\n if not low_memory:\n # self.ea_layer.head=nn.Linear(base_model.lm_head.in_features,base_model.lm_head.out_features,bias=False)\n # self.ea_layer.head.weight=copy.deepcopy(base_model.lm_head.weight)\n # self.ea_layer.head.to(device)\n self.ea_layer.headweight = base_model.lm_head.weight.clone().to(device)\n else:\n self.ea_layer.layer_device = device\n\n else:\n self.ea_layer.diff_device = False\n self.ea_layer.to(self.base_model.dtype).to(device)\n self.ea_layer.init_tree()\n\n def get_tokenizer(self):\n \"\"\"Get the tokenizer of the base model.\n\n Returns:\n Tokenizer: The tokenizer of the base model.\n \"\"\"\n return self.tokenizer\n\n @classmethod\n def from_pretrained(\n cls,\n base_model_path=None,\n ea_model_path=None,\n **kwargs,\n ):\n\n base_model = KVLlamaForCausalLM.from_pretrained(\n base_model_path, **kwargs\n )\n\n configpath=os.path.join(ea_model_path,\"config.json\")\n if not os.path.exists(configpath):\n configpath = hf_hub_download(ea_model_path, \"config.json\")\n model = cls(\n base_model,\n base_model_path,\n configpath\n )\n load_model_path=os.path.join(ea_model_path, \"pytorch_model.bin\")\n if not os.path.exists(load_model_path):\n load_model_path=hf_hub_download(ea_model_path, \"pytorch_model.bin\")\n ea_layer_state_dict = torch.load(load_model_path,\n map_location=base_model.device)\n model.ea_layer.load_state_dict(ea_layer_state_dict, strict=True)\n\n return model\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n labels=None,\n past_key_values=None,\n output_orig=False,\n position_ids=None,\n init=True,\n logits_processor=None\n ):\n\n with torch.inference_mode():\n # Pass input through the base model\n outputs = self.base_model.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n position_ids=position_ids,\n )\n if output_orig:\n orig = self.base_model.lm_head(outputs[0])\n hidden_states = outputs[0].clone()\n if init:\n if logits_processor is not None:\n logits = orig[:, -1]\n logits = logits_processor(None, logits)\n probabilities = torch.nn.functional.softmax(logits, dim=1)\n token = torch.multinomial(probabilities, 1)\n else:\n token = torch.argmax(orig[:, -1])\n token = token[None, None]\n input_ids = torch.cat((input_ids, token.to(input_ids.device)), dim=1)\n # Clone the output hidden states\n\n ea_logits = self.ea_layer.topK_genrate(hidden_states, input_ids, self.base_model.lm_head, logits_processor)\n if output_orig:\n return ea_logits, outputs, orig, hidden_states, token\n return ea_logits, hidden_states, token\n else:\n if output_orig:\n return outputs, orig, hidden_states\n\n @torch.no_grad()\n def eagenerate(\n self,\n input_ids,\n temperature=0.0,\n top_p=0.0,\n top_k=0.0,\n max_new_tokens=512,\n max_length=2048,\n tree_choices=mc_sim_7b_63,\n\n ):\n if temperature > 1e-5:\n logits_processor = prepare_logits_processor(temperature=temperature, top_p=top_p, top_k=top_k)\n else:\n logits_processor = None\n assert input_ids.shape[0] == 1, \"Only support batch size 1 for now!!\"\n # Avoid modifying the input_ids in-place\n input_ids = input_ids.clone()\n self.ea_layer.reset_kv()\n\n if hasattr(self, \"tree_choices\") and self.tree_choices == tree_choices:\n tree_buffers = self.tree_buffers\n else:\n tree_buffers = generate_tree_buffers(\n tree_choices, device=self.base_model.model.layers[-1].self_attn.q_proj.weight.device\n )\n tree_buffers[\"retrieve_indices_head\"] = tree_buffers[\"retrieve_indices\"].to(\n self.base_model.lm_head.weight.device)\n self.tree_buffers = tree_buffers\n self.tree_choices = tree_choices\n\n # Initialize the past key and value states\n if hasattr(self, \"past_key_values\"):\n past_key_values = self.past_key_values\n past_key_values_data = self.past_key_values_data\n current_length_data = self.current_length_data\n # Reset the past key and value states\n current_length_data.zero_()\n else:\n (\n past_key_values,\n past_key_values_data,\n current_length_data,\n ) = initialize_past_key_values(self.base_model)\n self.past_key_values = past_key_values\n self.past_key_values_data = past_key_values_data\n self.current_length_data = current_length_data\n\n input_len = input_ids.shape[1]\n reset_tree_mode(self)\n tree_logits, logits, hidden_state, sample_token = initialize_tree(\n input_ids, self, tree_buffers[\"tree_attn_mask\"], past_key_values, logits_processor\n )\n new_token = 0\n\n for idx in range(max_length):\n candidates, cart_candidates_prob, tree_candidates = generate_candidates(\n tree_logits,\n tree_buffers[\"tree_indices\"],\n tree_buffers[\"retrieve_indices\"],\n sample_token,\n logits_processor\n )\n logits, hidden_state_new, outputs = tree_decoding(\n self,\n tree_candidates,\n past_key_values,\n tree_buffers[\"tree_position_ids\"],\n input_ids,\n tree_buffers[\"retrieve_indices_head\"],\n )\n best_candidate, accept_length, sample_p = evaluate_posterior(\n logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers[\"p_indices\"],\n tree_candidates, tree_buffers[\"b_indices\"]\n )\n input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(\n input_ids,\n candidates,\n best_candidate,\n accept_length,\n tree_buffers[\"retrieve_indices\"],\n logits_processor,\n logits,\n tree_logits,\n new_token,\n past_key_values_data,\n current_length_data,\n self,\n hidden_state,\n hidden_state_new,\n sample_p\n )\n\n if self.tokenizer.eos_token_id in input_ids[0, input_len:].tolist():\n return input_ids\n if new_token > max_new_tokens:\n return input_ids\n if input_ids.shape[1] > max_length:\n return input_ids\n\n @torch.no_grad()\n def ea_generate(\n self,\n input_ids,\n temperature=0.0,\n top_p=0.0,\n top_k=0.0,\n max_steps=512,\n tree_choices=mc_sim_7b_63,\n\n ):\n if temperature > 1e-5:\n logits_processor = prepare_logits_processor(temperature=temperature, top_p=top_p, top_k=top_k)\n else:\n logits_processor = None\n assert input_ids.shape[0] == 1, \"Only support batch size 1 for now!!\"\n # Avoid modifying the input_ids in-place\n input_ids = input_ids.clone()\n self.ea_layer.reset_kv()\n\n if hasattr(self, \"tree_choices\") and self.tree_choices == tree_choices:\n tree_buffers = self.tree_buffers\n else:\n tree_buffers = generate_tree_buffers(\n tree_choices, device=self.base_model.model.layers[-1].self_attn.q_proj.weight.device\n )\n tree_buffers[\"retrieve_indices_head\"] = tree_buffers[\"retrieve_indices\"].to(\n self.base_model.lm_head.weight.device)\n self.tree_buffers = tree_buffers\n self.tree_choices = tree_choices\n\n # Initialize the past key and value states\n if hasattr(self, \"past_key_values\"):\n past_key_values = self.past_key_values\n past_key_values_data = self.past_key_values_data\n current_length_data = self.current_length_data\n # Reset the past key and value states\n current_length_data.zero_()\n else:\n (\n past_key_values,\n past_key_values_data,\n current_length_data,\n ) = initialize_past_key_values(self.base_model)\n self.past_key_values = past_key_values\n self.past_key_values_data = past_key_values_data\n self.current_length_data = current_length_data\n\n input_len = input_ids.shape[1]\n reset_tree_mode(self)\n tree_logits, logits, hidden_state, sample_token = initialize_tree(\n input_ids, self, tree_buffers[\"tree_attn_mask\"], past_key_values, logits_processor\n )\n new_token = 0\n\n for idx in range(max_steps):\n candidates, cart_candidates_prob, tree_candidates = generate_candidates(\n tree_logits,\n tree_buffers[\"tree_indices\"],\n tree_buffers[\"retrieve_indices\"],\n sample_token,\n logits_processor\n )\n logits, hidden_state_new, outputs = tree_decoding(\n self,\n tree_candidates,\n past_key_values,\n tree_buffers[\"tree_position_ids\"],\n input_ids,\n tree_buffers[\"retrieve_indices_head\"],\n )\n best_candidate, accept_length, sample_p = evaluate_posterior(\n logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers[\"p_indices\"],\n tree_candidates, tree_buffers[\"b_indices\"]\n )\n input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(\n input_ids,\n candidates,\n best_candidate,\n accept_length,\n tree_buffers[\"retrieve_indices\"],\n logits_processor,\n logits,\n tree_logits,\n new_token,\n past_key_values_data,\n current_length_data,\n self,\n hidden_state,\n hidden_state_new,\n sample_p\n )\n\n yield input_ids\n\n if self.tokenizer.eos_token_id in input_ids[0, input_len:].tolist():\n break\n if new_token > 1024:\n break\n if input_ids.shape[1] > 1960:\n break\n\n @torch.no_grad()\n def naive_generate(\n self,\n input_ids,\n temperature=0.0,\n top_p=0.0,\n top_k=0.0,\n max_steps=512,\n tree_choices=mc_sim_7b_63,\n\n ):\n if temperature > 1e-5:\n logits_processor = prepare_logits_processor(temperature=temperature, top_p=top_p, top_k=top_k)\n else:\n logits_processor = None\n assert input_ids.shape[0] == 1, \"Only support batch size 1 for now!!\"\n # Avoid modifying the input_ids in-place\n input_ids = input_ids.clone()\n self.ea_layer.reset_kv()\n\n if hasattr(self, \"tree_choices\") and self.tree_choices == tree_choices:\n tree_buffers = self.tree_buffers\n else:\n tree_buffers = generate_tree_buffers(\n tree_choices, device=self.base_model.model.layers[-1].self_attn.q_proj.weight.device\n )\n tree_buffers[\"retrieve_indices_head\"] = tree_buffers[\"retrieve_indices\"].to(\n self.base_model.lm_head.weight.device)\n self.tree_buffers = tree_buffers\n self.tree_choices = tree_choices\n\n # Initialize the past key and value states\n if hasattr(self, \"past_key_values\"):\n past_key_values = self.past_key_values\n past_key_values_data = self.past_key_values_data\n current_length_data = self.current_length_data\n # Reset the past key and value states\n current_length_data.zero_()\n else:\n (\n past_key_values,\n past_key_values_data,\n current_length_data,\n ) = initialize_past_key_values(self.base_model)\n self.past_key_values = past_key_values\n self.past_key_values_data = past_key_values_data\n self.current_length_data = current_length_data\n\n input_len = input_ids.shape[1]\n reset_tree_mode(self)\n outputs = self.base_model(input_ids, past_key_values=past_key_values, use_cache=True)\n new_token = 0\n\n for idx in range(max_steps):\n input_id = outputs.logits[:, -1:].argmax(dim=-1)\n outputs = self.base_model(input_id, use_cache=True, past_key_values=past_key_values)\n input_ids = torch.cat([input_ids, input_id], dim=-1)\n\n yield input_ids\n\n if self.tokenizer.eos_token_id in input_ids[0, input_len:].tolist():\n break\n if new_token > 1024:\n break\n if input_ids.shape[1] > 1960:\n break"
},
{
"identifier": "initialize_past_key_values",
"path": "model/kv_cache.py",
"snippet": "def initialize_past_key_values(model):\n \"\"\"\n Initialize past key and value states for a given transformer model.\n\n This function prepares key-value cache structures for the model, allowing it to store and reuse\n past key and value states during autoregressive decoding, which can improve efficiency.\n\n Args:\n model (nn.Module): The transformer model for which past key-value states need to be initialized.\n\n Returns:\n tuple:\n - past_key_values (list): A list of KVCache objects for each layer in the model.\n - past_key_values_data (torch.Tensor): The tensor that will store all keys and values.\n - current_length_data (torch.Tensor): A tensor tracking the current length of keys/values in the cache.\n \"\"\"\n # Extracting configuration from the model\n config = model.config\n # Initializing the batch size to 1, this can be modified if different batch sizes are required\n batch_size = 1\n # Initializing a tensor to store past keys and values for all layers\n\n devices=[]\n for i in range(config.num_hidden_layers):\n try:\n device = model.model.layers[i].self_attn.q_proj.weight.device\n except:\n device=model.layers[i].self_attn.q_proj.weight.device\n devices.append(device)\n past_key_values_data_list=[]\n startnum=0\n startdevice=devices[0]\n for id,i in enumerate(devices):\n if startdevice!=i:\n past_key_values_data = torch.zeros(\n startnum * 2,\n batch_size,\n config.num_key_value_heads,\n config.max_position_embeddings,\n config.hidden_size // config.num_attention_heads,\n device=startdevice,\n dtype=model.dtype,\n )\n past_key_values_data_list.append(past_key_values_data)\n startdevice = i\n startnum=0\n startnum += 1\n past_key_values_data = torch.zeros(\n startnum * 2,\n batch_size,\n config.num_key_value_heads,\n config.max_position_embeddings,\n config.hidden_size // config.num_attention_heads,\n device=startdevice,\n dtype=model.dtype,\n )\n past_key_values_data_list.append(past_key_values_data)\n # Initialize tensor to store the current length of the cached data for all layers.\n # [IMPORTANT] It needs to be kept on CPU for quick access and updates.\n current_length_data = torch.zeros(\n config.num_hidden_layers * 2, dtype=torch.long, device=\"cpu\"\n )\n # Creating a KVCache for each pair of key and value in all layers\n past_key_values = [] * config.num_hidden_layers\n\n bias=0\n start_data_m=devices[0].index\n for i in range(config.num_hidden_layers):\n data_m=devices[i].index\n if data_m!=start_data_m:\n bias=0\n start_data_m=data_m\n past_key_values.append(\n [\n KVCache(past_key_values_data_list[data_m-devices[0].index][2*bias + j], current_length_data[i * 2 + j])\n for j in range(2)\n ]\n )\n bias+=1\n return past_key_values, past_key_values_data_list, current_length_data"
}
] | import argparse
import json
import os
import time
import shortuuid
import ray
from fastchat.llm_judge.common import load_questions
from fastchat.model import get_conversation_template
from tqdm import tqdm
from model.ea_model import EaModel
from model.kv_cache import initialize_past_key_values
from model.utils import *
from model.choices import * | 5,371 |
for idx in range(max_steps):
candidates, cart_candidates_prob, tree_candidates = generate_candidates(
tree_logits,
tree_buffers["tree_indices"],
tree_buffers["retrieve_indices"],
sample_token,
logits_processor
)
logits, hidden_state_new, outputs = tree_decoding(
model,
tree_candidates,
past_key_values,
tree_buffers["tree_position_ids"],
input_ids,
tree_buffers["retrieve_indices_head"],
)
best_candidate, accept_length, sample_p = evaluate_posterior(
logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers["p_indices"],
tree_candidates, tree_buffers["b_indices"]
)
input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(
input_ids,
candidates,
best_candidate,
accept_length,
tree_buffers["retrieve_indices"],
logits_processor,
logits,
tree_logits,
new_token,
past_key_values_data,
current_length_data,
model,
hidden_state,
hidden_state_new,
sample_p
)
if tokenizer.eos_token_id in input_ids[0, input_len:].tolist():
break
if new_token > 1024:
break
if input_ids.shape[1] > 1960:
break
return input_ids, new_token, idx
def run_eval(
base_model_path,
ea_model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
temperature,
tree_choices,
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
# random.shuffle(questions)
shuffled_ids = [q["question_id"] for q in questions]
# with open(f"data/{args.bench_name}/model_ids/{args.model_id}.shuffled_ids", "w") as fout:
# json.dump(shuffled_ids, fout)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
use_ray = num_gpus_total // num_gpus_per_model > 1
if use_ray:
get_answers_func = ray.remote(num_gpus=num_gpus_per_model)(
get_model_answers
).remote
else:
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
base_model_path,
ea_model_path,
model_id,
questions[i: i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
)
)
if use_ray:
ray.get(ans_handles)
@torch.inference_mode()
def get_model_answers(
base_model_path,
ea_model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
):
# temperature = 0.0
| """Generate answers with local models.
Usage:
python3 gen_model_answer.py --model-path lmsys/fastchat-t5-3b-v1.0 --model-id fastchat-t5-3b-v1.0
"""
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
def ea_forward(input_ids, model, tokenizer, tree_choices, logits_processor=None, max_steps=512):
assert input_ids.shape[0] == 1, "Only support batch size 1 for now!!"
# Avoid modifying the input_ids in-place
input_ids = input_ids.clone()
model.ea_layer.reset_kv()
if hasattr(model, "tree_choices") and model.tree_choices == tree_choices:
tree_buffers = model.tree_buffers
else:
tree_buffers = generate_tree_buffers(
tree_choices, device=model.base_model.model.layers[-1].self_attn.q_proj.weight.device
)
tree_buffers["retrieve_indices_head"] = tree_buffers["retrieve_indices"].to(
model.base_model.lm_head.weight.device)
model.tree_buffers = tree_buffers
model.tree_choices = tree_choices
# Initialize the past key and value states
if hasattr(model, "past_key_values"):
past_key_values = model.past_key_values
past_key_values_data = model.past_key_values_data
current_length_data = model.current_length_data
# Reset the past key and value states
current_length_data.zero_()
else:
(
past_key_values,
past_key_values_data,
current_length_data,
) = initialize_past_key_values(model.base_model)
model.past_key_values = past_key_values
model.past_key_values_data = past_key_values_data
model.current_length_data = current_length_data
input_len = input_ids.shape[1]
reset_tree_mode(model)
tree_logits, logits, hidden_state, sample_token = initialize_tree(
input_ids, model, tree_buffers["tree_attn_mask"], past_key_values, logits_processor
)
new_token = 0
for idx in range(max_steps):
candidates, cart_candidates_prob, tree_candidates = generate_candidates(
tree_logits,
tree_buffers["tree_indices"],
tree_buffers["retrieve_indices"],
sample_token,
logits_processor
)
logits, hidden_state_new, outputs = tree_decoding(
model,
tree_candidates,
past_key_values,
tree_buffers["tree_position_ids"],
input_ids,
tree_buffers["retrieve_indices_head"],
)
best_candidate, accept_length, sample_p = evaluate_posterior(
logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers["p_indices"],
tree_candidates, tree_buffers["b_indices"]
)
input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(
input_ids,
candidates,
best_candidate,
accept_length,
tree_buffers["retrieve_indices"],
logits_processor,
logits,
tree_logits,
new_token,
past_key_values_data,
current_length_data,
model,
hidden_state,
hidden_state_new,
sample_p
)
if tokenizer.eos_token_id in input_ids[0, input_len:].tolist():
break
if new_token > 1024:
break
if input_ids.shape[1] > 1960:
break
return input_ids, new_token, idx
def run_eval(
base_model_path,
ea_model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
temperature,
tree_choices,
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
# random.shuffle(questions)
shuffled_ids = [q["question_id"] for q in questions]
# with open(f"data/{args.bench_name}/model_ids/{args.model_id}.shuffled_ids", "w") as fout:
# json.dump(shuffled_ids, fout)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
use_ray = num_gpus_total // num_gpus_per_model > 1
if use_ray:
get_answers_func = ray.remote(num_gpus=num_gpus_per_model)(
get_model_answers
).remote
else:
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
base_model_path,
ea_model_path,
model_id,
questions[i: i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
)
)
if use_ray:
ray.get(ans_handles)
@torch.inference_mode()
def get_model_answers(
base_model_path,
ea_model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
):
# temperature = 0.0
| model = EaModel.from_pretrained( | 0 | 2023-12-07 19:08:39+00:00 | 8k |
zju3dv/EasyVolcap | scripts/torchxpbd/extract_unclothed_body.py | [
{
"identifier": "export_mesh",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def export_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = \"default.ply\", subdivision=0):\n dirname = os.path.dirname(filename)\n if dirname: os.makedirs(dirname, exist_ok=True)\n\n if subdivision > 0:\n from easyvolcap.utils.mesh_utils import face_normals, loop_subdivision\n verts, faces = loop_subdivision(verts, faces, subdivision)\n\n if filename.endswith('.npz'):\n def collect_args(**kwargs): return kwargs\n kwargs = collect_args(verts=verts, faces=faces, uv=uv, img=img, uvfaces=uvfaces, colors=colors, normals=normals)\n ret = dotdict({k: v for k, v in kwargs.items() if v is not None})\n export_dotdict(ret, filename)\n\n elif filename.endswith('.ply') or filename.endswith('.obj'):\n if uvfaces is None:\n mesh = get_mesh(verts, faces, uv, img, colors, normals, filename)\n mesh.export(filename)\n else:\n from pytorch3d.io import save_obj\n verts, faces, uv, img, uvfaces = get_tensor_mesh_data(verts, faces, uv, img, uvfaces)\n save_obj(filename, verts, faces, verts_uvs=uv, faces_uvs=uvfaces, texture_map=img)\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')"
},
{
"identifier": "load_mesh",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f"
},
{
"identifier": "laplacian_smoothing",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def laplacian_smoothing(v: torch.Tensor, e: torch.Tensor, inds: torch.Tensor = None, alpha=0.33, iter=90):\n for i in range(iter):\n # 1st gaussian smoothing pass\n L = laplacian(v, e)\n vln = L @ v\n if inds is None:\n v += alpha * vln\n else:\n v[inds] += alpha * vln[inds]\n\n # 2nd gaussian smoothing pass\n L = laplacian(v, e)\n vln = L @ v\n if inds is None:\n v += -(alpha + 0.01) * vln\n else:\n v[inds] += -(alpha + 0.01) * vln[inds]\n return v"
},
{
"identifier": "hierarchical_winding_distance_remesh",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def hierarchical_winding_distance_remesh(\n verts: torch.Tensor,\n faces: torch.Tensor,\n init_voxel_size=0.05, # 5cm voxels\n init_dist_th_verts=1.0, # 50cm hole range\n init_dist_th_tris=0.25, # 50cm hole range\n steps=4,\n **kwargs,\n):\n guide_verts, guide_faces = verts, faces\n voxel_size, dist_th_verts, dist_th_tris = init_voxel_size, init_dist_th_verts, init_dist_th_tris\n decay = np.power(dist_th_tris / (voxel_size / 2**(steps - 2)), 1 / (steps - 1)) if steps > 1 else -1\n # log(decay)\n for i in range(int(steps)):\n guide_verts, guide_faces = winding_distance_remesh(verts, faces, guide_verts, guide_faces, voxel_size, dist_th_verts, dist_th_tris, **kwargs)\n voxel_size, dist_th_verts, dist_th_tris = voxel_size / 2, dist_th_verts / decay, dist_th_tris / decay\n\n return guide_verts, guide_faces"
},
{
"identifier": "get_edges",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def get_edges(faces: torch.Tensor):\n V = faces.max()\n F = faces.shape[0]\n HE = F * 3\n\n # create halfedges\n v0, v1, v2 = faces.chunk(3, dim=-1)\n e01 = torch.cat([v0, v1], dim=-1) # (sum(F_n), 2)\n e12 = torch.cat([v1, v2], dim=-1) # (sum(F_n), 2)\n e20 = torch.cat([v2, v0], dim=-1) # (sum(F_n), 2)\n\n # stores the vertex indices for each half edge\n e = torch.empty(HE, 2, device=faces.device, dtype=faces.dtype)\n e[0::3] = e01\n e[1::3] = e12\n e[2::3] = e20\n vert = e[..., 0] # HE, :record starting half edge\n vert_next = e[..., 1]\n\n edges = torch.stack([torch.minimum(vert_next, vert), torch.maximum(vert_next, vert)], dim=-1)\n hash = V * edges[..., 0] + edges[..., 1] # HE, 2, contains edge hash, should be unique\n u, i, c = hash.unique(sorted=False, return_inverse=True, return_counts=True)\n\n e = torch.stack([u // V, u % V], dim=1)\n return e, i, c"
},
{
"identifier": "adjacency",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def adjacency(verts: torch.Tensor, edges: torch.Tensor):\n V = verts.shape[0]\n\n e0, e1 = edges.unbind(1)\n\n idx01 = torch.stack([e0, e1], dim=1) # (E, 2)\n idx10 = torch.stack([e1, e0], dim=1) # (E, 2)\n idx = torch.cat([idx01, idx10], dim=0).t() # (2, 2*E)\n\n # First, we construct the adjacency matrix,\n # i.e. A[i, j] = 1 if (i,j) is an edge, or\n # A[e0, e1] = 1 & A[e1, e0] = 1\n ones = torch.ones(idx.shape[1], dtype=torch.float32, device=verts.device)\n # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`.\n A = torch.sparse.FloatTensor(idx, ones, (V, V))\n return A"
},
{
"identifier": "winding_number_nooom",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def winding_number_nooom(pts: torch.Tensor, verts: torch.Tensor, faces: torch.Tensor, quota_GB=15.0):\n # allocate chunk size to avoid oom when computing winding number\n faces_cnt_shape = faces.shape[:-1]\n faces_cnt = reduce(lambda x, y: x * y, faces_cnt_shape)\n quota_B = quota_GB * 2 ** 30 # GB -> B\n chunk = int(quota_B / (faces_cnt * winding_number.constant)) # quota = tris_cnt * pts_cnt * winding_number.constant\n\n # compute winding_number_distance on GPU and store results on CPU\n winding = []\n for i in tqdm(range(0, pts.shape[-2], chunk)):\n pts_chunk = pts[..., i:i + chunk, :]\n winding_chunk = winding_number(pts_chunk, verts, faces)\n winding.append(winding_chunk)\n winding = torch.cat(winding, dim=-1)\n\n return winding"
},
{
"identifier": "segment_mesh",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def segment_mesh(verts: torch.Tensor,\n faces: torch.Tensor,\n vs: torch.Tensor,\n inds: torch.Tensor,\n smoothing: str = 'mesh',\n dilate=0,\n ):\n # prepare vertex semantics\n vs_bits = 1 << vs\n\n # prepare indices of semantics to preserve\n inds_bits = (1 << inds).sum() # no repeatition\n\n # prepare faces to be preserved\n vm = (inds_bits & vs_bits) != 0\n\n # dilate the vertex mask along\n if dilate < 0:\n vm = ~vm\n vm = vm.float()\n edges, i, count = get_edges(faces)\n A = adjacency(verts, edges)\n for i in range(abs(dilate)):\n vm = A @ vm\n vm = vm.bool()\n if dilate < 0:\n vm = ~vm\n\n # extract face mask\n tm = multi_gather_tris(vm, faces, dim=-1) # F, 3\n fm = tm.sum(dim=-1) != 0\n\n # create the extracted mesh\n f, vm = unmerge_faces(faces[fm])\n v = verts[vm]\n\n # perform laplacian smoothing on edge vertices / faces (or maybe just the whole mesh to acquire a better shape?)\n e, i, c = get_edges(f)\n if smoothing == 'edge':\n svi = e[c != 2].ravel().unique() # selected vertices' indices\n else:\n svi = None\n v = laplacian_smoothing(v, e, svi)\n\n # Fill single triangle and single quad holes in the current mesh. Will remove non-manifold vertices maybe?\n mesh = trimesh.Trimesh(v.detach().cpu().numpy(), f.detach().cpu().numpy())\n mesh.fill_holes()\n v, f = mesh.vertices, mesh.faces\n\n # Convert back to torch.Tensor\n v, f = torch.tensor(v, device=verts.device, dtype=torch.float), torch.tensor(f, device=verts.device, dtype=torch.long)\n\n return v, f"
},
{
"identifier": "bidirectional_icp_fitting",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def bidirectional_icp_fitting(v0: torch.Tensor,\n f0: torch.Tensor,\n v1: torch.Tensor,\n f1: torch.Tensor,\n lambda_smooth: int = 29,\n opt_iter: int = 500,\n ep_iter: int = 50,\n lr: float = 3e-2,\n boundary_focus: bool = True,\n dilate: int = 0,\n ):\n \"\"\"\n Robust Bidirectional Mesh Fitting\n TODO: Need to investigate why the meshes got stretched along the orthogonal of the normal direction\n \"\"\"\n\n if boundary_focus:\n # select vertices to focus optimization on\n e0, i0, c0 = get_edges(f0)\n e1, i1, c1 = get_edges(f1)\n svi0 = e0[c0 != 2].ravel().unique() # selected vertices' indices: boundary\n svi1 = e1[c1 != 2].ravel().unique() # selected vertices' indices: boundary\n\n # dialte the boundary edge selection\n vm0 = torch.zeros(v0.shape[:-1], device=v0.device, dtype=torch.bool)\n vm1 = torch.zeros(v1.shape[:-1], device=v1.device, dtype=torch.bool)\n vm0[svi0] = True\n vm1[svi1] = True\n A0 = adjacency(v0, e0)\n A1 = adjacency(v1, e1)\n vm0 = vm0.float()\n vm1 = vm1.float()\n for i in range(abs(dilate)):\n vm0 = A0 @ vm0\n vm1 = A1 @ vm1\n vm0 = vm0.bool()\n vm1 = vm1.bool()\n svi0 = vm0.nonzero(as_tuple=True)[0]\n svi1 = vm1.nonzero(as_tuple=True)[0]\n else:\n svi0, svi1 = None, None\n\n # assume no batch dim\n M0 = compute_matrix(v0, f0, lambda_smooth)\n M1 = compute_matrix(v1, f1, lambda_smooth)\n p0 = to_differential(M0, v0)\n p1 = to_differential(M1, v1)\n p0.requires_grad_()\n p1.requires_grad_()\n optim = AdamUniform([p0, p1], lr=lr)\n\n pbar = tqdm(range(opt_iter))\n for i in range(opt_iter):\n v0 = from_differential(M0, p0, 'Cholesky')\n v1 = from_differential(M1, p1, 'Cholesky')\n t0 = multi_gather_tris(v0, f0)\n t1 = multi_gather_tris(v1, f1)\n\n m = Meshes([v0, v1], [f0, f1])\n nv0, nv1 = m.verts_normals_list()\n nf0, nf1 = m.faces_normals_list()\n\n if svi0 is not None:\n v0, nv0 = v0[svi0], nv0[svi0]\n if svi1 is not None:\n v1, nv1 = v1[svi1], nv1[svi1]\n\n loss0 = icp_loss(v0, t1, nv0, nf1)\n loss1 = icp_loss(v1, t0, nv1, nf0)\n loss = loss0 + loss1\n\n optim.zero_grad(set_to_none=True)\n loss.backward()\n optim.step()\n\n pbar.update(1)\n if i % ep_iter == 0:\n pbar.write(f'bidirectional L2 loss: {loss.item():.5g}')\n\n v0 = from_differential(M0, p0.detach(), 'Cholesky')\n v1 = from_differential(M1, p1.detach(), 'Cholesky')\n return v0, v1"
},
{
"identifier": "loop_subdivision",
"path": "easyvolcap/utils/mesh_utils.py",
"snippet": "def loop_subdivision(v: torch.Tensor, f: torch.Tensor, steps=2):\n halfedge = triangle_to_halfedge(v, f)\n halfedge = multiple_halfedge_loop_subdivision(halfedge, steps)\n return halfedge_to_triangle(halfedge)"
},
{
"identifier": "semantic_dim",
"path": "easyvolcap/utils/sem_utils.py",
"snippet": "def color_to_semantic(schp: torch.Tensor, # B, H, W, 3\n palette: torch.Tensor, # 256, 3\n ):\ndef semantics_to_color(semantic: torch.Tensor, # V,\n palette: torch.Tensor, # 256, 3\n ):\ndef palette_to_index(sem: np.ndarray, semantic_dim=semantic_dim):\ndef palette_to_onehot(sem: np.ndarray, semantic_dim=semantic_dim):\ndef get_schp_palette(num_cls=256):\ndef get_schp_palette_tensor_float(num_cls=semantic_dim, device='cuda'):"
}
] | import torch
import argparse
import numpy as np
import sys
from easyvolcap.utils.data_utils import export_mesh, load_mesh
from easyvolcap.utils.mesh_utils import laplacian_smoothing, hierarchical_winding_distance_remesh, get_edges, adjacency, winding_number_nooom, segment_mesh, bidirectional_icp_fitting, loop_subdivision
from easyvolcap.utils.sem_utils import semantic_dim, semantic_list | 4,804 |
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--clothed_input', default='data/xuzhen36/talk/registration/deformation/semantic_mesh.npz')
parser.add_argument('--body_input', default='data/xuzhen36/talk/registration/deformation/semantic_smplh.npz')
parser.add_argument('--body_output', default='data/xuzhen36/talk/registration/deformation/body_mesh.ply')
parser.add_argument('--cloth_output', default='data/xuzhen36/talk/registration/deformation/cloth_mesh.ply')
parser.add_argument('--cloth_list', nargs='+', default=['upper_cloth'])
args = parser.parse_args()
# global arguments
device = 'cuda'
# maybe perform subdivision before hand? and use catmull clark instead of simple subdivision provided by trimesh
# https://onrendering.com/data/papers/catmark/HalfedgeCatmullClark.pdf
# https://github.com/jdupuy/HalfedgeCatmullClark
v0, f0 = load_mesh(args.clothed_input, device)
vs0 = torch.tensor(np.load(args.clothed_input)['verts_semantics'], device=v0.device)
i1 = list(map(lambda x: semantic_list.index(x), args.cloth_list))
i1 = torch.tensor(i1, device=v0.device, dtype=torch.long)
# segment based on vertex semantices
v, f = segment_mesh(v0, f0, vs0, i1, smoothing='edge')
v, f = loop_subdivision(v, f, 1)
# save the results
export_mesh(v, f, filename=args.cloth_output)
# extract body mesh
i0 = list(map(lambda x: semantic_list.index(x), [s for s in semantic_list if s not in args.cloth_list]))
i0 = torch.tensor(i0, device=v.device, dtype=torch.long)
v0, f0 = segment_mesh(v0, f0, vs0, i0, smoothing='edge', dilate=-1)
v0, f0 = loop_subdivision(v0, f0, 1)
v1, f1 = load_mesh(args.body_input, device)
vs1 = torch.tensor(np.load(args.body_input)['verts_semantics'], device=v.device)
v1, f1 = segment_mesh(v1, f1, vs1, i1, smoothing='edge', dilate=3)
v1, f1 = loop_subdivision(v1, f1, 2)
v0, v1 = bidirectional_icp_fitting(v0, f0, v1, f1)
level_set = 0.334
v2, f2 = torch.cat([v0, v1]), torch.cat([f0, f1+len(v0)])
# v, f = v2, f2
v, f = hierarchical_winding_distance_remesh(v2, f2, level_set=level_set)
# 0.334 will produce ripple effects on perfectly normal mesh if thresh of winding number is too low
# (1 - th) / 2 + (level_set - 0.5).abs() > 0.5 - maximum_error
# 0.5 - th / 2 + 0.5 - level_set - 0.5 > - maximum_error
# th / 2 + level_set - 0.5 > maximum_error
# 0.225 + 0.334 - 0.5 = 0.059 > maximum_error
# conditional laplacian smoothing
th = 0.45 # 1-max_error
wn = winding_number_nooom(v, v2, f2) # TODO: use grid sample from previous computation to make this faster
vm = (wn - level_set).abs() < (th / 2)
# compute edges
|
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--clothed_input', default='data/xuzhen36/talk/registration/deformation/semantic_mesh.npz')
parser.add_argument('--body_input', default='data/xuzhen36/talk/registration/deformation/semantic_smplh.npz')
parser.add_argument('--body_output', default='data/xuzhen36/talk/registration/deformation/body_mesh.ply')
parser.add_argument('--cloth_output', default='data/xuzhen36/talk/registration/deformation/cloth_mesh.ply')
parser.add_argument('--cloth_list', nargs='+', default=['upper_cloth'])
args = parser.parse_args()
# global arguments
device = 'cuda'
# maybe perform subdivision before hand? and use catmull clark instead of simple subdivision provided by trimesh
# https://onrendering.com/data/papers/catmark/HalfedgeCatmullClark.pdf
# https://github.com/jdupuy/HalfedgeCatmullClark
v0, f0 = load_mesh(args.clothed_input, device)
vs0 = torch.tensor(np.load(args.clothed_input)['verts_semantics'], device=v0.device)
i1 = list(map(lambda x: semantic_list.index(x), args.cloth_list))
i1 = torch.tensor(i1, device=v0.device, dtype=torch.long)
# segment based on vertex semantices
v, f = segment_mesh(v0, f0, vs0, i1, smoothing='edge')
v, f = loop_subdivision(v, f, 1)
# save the results
export_mesh(v, f, filename=args.cloth_output)
# extract body mesh
i0 = list(map(lambda x: semantic_list.index(x), [s for s in semantic_list if s not in args.cloth_list]))
i0 = torch.tensor(i0, device=v.device, dtype=torch.long)
v0, f0 = segment_mesh(v0, f0, vs0, i0, smoothing='edge', dilate=-1)
v0, f0 = loop_subdivision(v0, f0, 1)
v1, f1 = load_mesh(args.body_input, device)
vs1 = torch.tensor(np.load(args.body_input)['verts_semantics'], device=v.device)
v1, f1 = segment_mesh(v1, f1, vs1, i1, smoothing='edge', dilate=3)
v1, f1 = loop_subdivision(v1, f1, 2)
v0, v1 = bidirectional_icp_fitting(v0, f0, v1, f1)
level_set = 0.334
v2, f2 = torch.cat([v0, v1]), torch.cat([f0, f1+len(v0)])
# v, f = v2, f2
v, f = hierarchical_winding_distance_remesh(v2, f2, level_set=level_set)
# 0.334 will produce ripple effects on perfectly normal mesh if thresh of winding number is too low
# (1 - th) / 2 + (level_set - 0.5).abs() > 0.5 - maximum_error
# 0.5 - th / 2 + 0.5 - level_set - 0.5 > - maximum_error
# th / 2 + level_set - 0.5 > maximum_error
# 0.225 + 0.334 - 0.5 = 0.059 > maximum_error
# conditional laplacian smoothing
th = 0.45 # 1-max_error
wn = winding_number_nooom(v, v2, f2) # TODO: use grid sample from previous computation to make this faster
vm = (wn - level_set).abs() < (th / 2)
# compute edges | e, i, c = get_edges(f) | 4 | 2023-12-07 08:53:42+00:00 | 8k |
minghanqin/LangSplat | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras"
},
{
"identifier": "qvec2rotmat",
"path": "scene/colmap_loader.py",
"snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])"
},
{
"identifier": "read_extrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_points3D_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors"
},
{
"identifier": "read_points3D_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors"
},
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics_utils.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics_utils.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self, include_feature=False):\n def restore(self, model_args, training_args, mode='train'):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_language_feature(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
}
] | import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud | 4,325 | class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="SIMPLE_RADIAL":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="SIMPLE_RADIAL":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) | cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) | 1 | 2023-12-11 06:33:35+00:00 | 8k |
alibaba/animate-anything | app_svd.py | [
{
"identifier": "tensor_to_vae_latent",
"path": "utils/common.py",
"snippet": "def tensor_to_vae_latent(t, vae):\n video_length = t.shape[1]\n\n t = rearrange(t, \"b f c h w -> (b f) c h w\")\n latents = vae.encode(t).latent_dist.sample()\n latents = rearrange(latents, \"(b f) c h w -> b c f h w\", f=video_length)\n latents = latents * 0.18215\n\n return latents"
},
{
"identifier": "DDPM_forward_timesteps",
"path": "utils/common.py",
"snippet": "def DDPM_forward_timesteps(x0, step, num_frames, scheduler):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n\n device = x0.device\n # timesteps are reversed\n timesteps = scheduler.timesteps[len(scheduler.timesteps)-step:]\n t = timesteps[0]\n\n if x0.shape[2] == 1:\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n else:\n xt = x0\n noise = torch.randn(xt.shape, dtype=xt.dtype, device=device)\n # t to tensor of batch size \n t = torch.tensor([t]*xt.shape[0], device=device)\n xt = scheduler.add_noise(xt, noise, t)\n return xt, timesteps"
},
{
"identifier": "MaskStableVideoDiffusionPipeline",
"path": "models/pipeline.py",
"snippet": "class MaskStableVideoDiffusionPipeline(StableVideoDiffusionPipeline):\n @torch.no_grad()\n def __call__(\n self,\n image,\n height: int = 576,\n width: int = 1024,\n num_frames: Optional[int] = None,\n num_inference_steps: int = 25,\n min_guidance_scale: float = 1.0,\n max_guidance_scale: float = 3.0,\n fps: int = 7,\n motion_bucket_id: int = 127,\n noise_aug_strength: int = 0.02,\n decode_chunk_size: Optional[int] = None,\n num_videos_per_prompt: Optional[int] = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,\n callback_on_step_end_tensor_inputs: List[str] = [\"latents\"],\n return_dict: bool = True,\n mask = None,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):\n Image or images to guide image generation. If you provide a tensor, it needs to be compatible with\n [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated image.\n num_frames (`int`, *optional*):\n The number of video frames to generate. Defaults to 14 for `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt`\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference. This parameter is modulated by `strength`.\n min_guidance_scale (`float`, *optional*, defaults to 1.0):\n The minimum guidance scale. Used for the classifier free guidance with first frame.\n max_guidance_scale (`float`, *optional*, defaults to 3.0):\n The maximum guidance scale. Used for the classifier free guidance with last frame.\n fps (`int`, *optional*, defaults to 7):\n Frames per second. The rate at which the generated images shall be exported to a video after generation.\n Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training.\n motion_bucket_id (`int`, *optional*, defaults to 127):\n The motion bucket ID. Used as conditioning for the generation. The higher the number the more motion will be in the video.\n noise_aug_strength (`int`, *optional*, defaults to 0.02):\n The amount of noise added to the init image, the higher it is the less the video will look like the init image. Increase it for more motion.\n decode_chunk_size (`int`, *optional*):\n The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency\n between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once\n for maximal quality. Reduce `decode_chunk_size` to reduce memory usage.\n num_videos_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generated image. Choose between `PIL.Image` or `np.array`.\n callback_on_step_end (`Callable`, *optional*):\n A function that calls at the end of each denoising steps during the inference. The function is called\n with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,\n callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by\n `callback_on_step_end_tensor_inputs`.\n callback_on_step_end_tensor_inputs (`List`, *optional*):\n The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list\n will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the\n `._callback_tensor_inputs` attribute of your pipeline class.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n\n Returns:\n [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is returned,\n otherwise a `tuple` is returned where the first element is a list of list with the generated frames.\n\n Examples:\n\n ```py\n from diffusers import StableVideoDiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n\n pipe = StableVideoDiffusionPipeline.from_pretrained(\"stabilityai/stable-video-diffusion-img2vid-xt\", torch_dtype=torch.float16, variant=\"fp16\")\n pipe.to(\"cuda\")\n\n image = load_image(\"https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200\")\n image = image.resize((1024, 576))\n\n frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0]\n export_to_video(frames, \"generated.mp4\", fps=7)\n ```\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_frames = num_frames if num_frames is not None else self.unet.config.num_frames\n decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(image, height, width)\n\n # 2. Define call parameters\n if isinstance(image, PIL.Image.Image):\n batch_size = 1\n elif isinstance(image, list):\n batch_size = len(image)\n else:\n batch_size = image.shape[0]\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = max_guidance_scale > 1.0\n\n # 3. Encode input image\n image_embeddings = self._encode_image(image, device, num_videos_per_prompt, do_classifier_free_guidance)\n\n # NOTE: Stable Diffusion Video was conditioned on fps - 1, which\n # is why it is reduced here.\n # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188\n fps = fps - 1\n\n # 4. Encode input image using VAE\n image = self.image_processor.preprocess(image, height=height, width=width)\n noise = randn_tensor(image.shape, generator=generator, device=image.device, dtype=image.dtype)\n image = image + noise_aug_strength * noise\n\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n if needs_upcasting:\n self.vae.to(dtype=torch.float32)\n\n image_latents = self._encode_vae_image(image, device, num_videos_per_prompt, do_classifier_free_guidance)\n image_latents = image_latents.to(image_embeddings.dtype)\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n\n # Repeat the image latents for each frame so we can concatenate them with the noise\n # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width]\n image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1)\n mask = repeat(mask, '1 h w -> 2 f 1 h w', f=num_frames)\n # 5. Get Added Time IDs\n added_time_ids = self._get_add_time_ids(\n fps,\n motion_bucket_id,\n noise_aug_strength,\n image_embeddings.dtype,\n batch_size,\n num_videos_per_prompt,\n do_classifier_free_guidance,\n )\n added_time_ids = added_time_ids.to(device)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_frames,\n num_channels_latents,\n height,\n width,\n image_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n # 7. Prepare guidance scale\n guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0)\n guidance_scale = guidance_scale.to(device, latents.dtype)\n guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1)\n guidance_scale = _append_dims(guidance_scale, latents.ndim)\n\n self._guidance_scale = guidance_scale\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n self._num_timesteps = len(timesteps)\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # Concatenate image_latents over channels dimention\n latent_model_input = torch.cat([mask, latent_model_input, image_latents], dim=2)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=image_embeddings,\n added_time_ids=added_time_ids,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents).prev_sample\n\n if callback_on_step_end is not None:\n callback_kwargs = {}\n for k in callback_on_step_end_tensor_inputs:\n callback_kwargs[k] = locals()[k]\n callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)\n\n latents = callback_outputs.pop(\"latents\", latents)\n\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n\n if not output_type == \"latent\":\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n frames = self.decode_latents(latents, num_frames, decode_chunk_size)\n frames = svd_tensor2vid(frames, self.image_processor, output_type=output_type)\n else:\n frames = latents\n\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return frames\n\n return StableVideoDiffusionPipelineOutput(frames=frames)"
}
] | import os
import random
import math
import gradio as gr
import torch
import torchvision.transforms as T
import imageio
from argparse import ArgumentParser
from diffusers.image_processor import VaeImageProcessor
from omegaconf import OmegaConf
from PIL import Image
from diffusers import StableVideoDiffusionPipeline
from utils.common import tensor_to_vae_latent, DDPM_forward_timesteps
from models.pipeline import MaskStableVideoDiffusionPipeline | 4,270 |
css = """
.toolbutton {
margin-buttom: 0em 0em 0em 0em;
max-width: 2.5em;
min-width: 2.5em !important;
height: 2.5em;
}
"""
class AnimateController:
def __init__(self, pretrained_model_path: str, validation_data,
output_dir, motion_mask = False, motion_strength = False):
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
device=torch.device("cuda")
self.validation_data = validation_data
self.output_dir = output_dir
# self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path,
# torch_dtype=torch.float16, variant="fp16").to(device)
self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path).to(device)
self.sample_idx = 0
def animate(
self,
init_img,
sample_step_slider,
seed_textbox,
fps_textbox,
num_frames_textbox,
motion_bucket_id_slider,
progress=gr.Progress(),
):
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
vae = self.pipeline.vae
diffusion_scheduler = self.pipeline.scheduler
validation_data = self.validation_data
validation_data.fps = int(fps_textbox)
validation_data.num_frames = int(num_frames_textbox)
validation_data.motion_bucket_id = int(motion_bucket_id_slider)
vae_processor = VaeImageProcessor()
device = vae.device
dtype = vae.dtype
pimg = Image.fromarray(init_img["background"]).convert('RGB')
width, height = pimg.size
scale = math.sqrt(width*height / (validation_data.height*validation_data.width))
block_size=64
height = round(height/scale/block_size)*block_size
width = round(width/scale/block_size)*block_size
input_image = vae_processor.preprocess(pimg, height, width)
input_image = input_image.unsqueeze(0).to(dtype).to(device)
input_image_latents = tensor_to_vae_latent(input_image, vae)
np_mask = init_img["layers"][0][:,:,3]
np_mask[np_mask!=0] = 255
if np_mask.sum() == 0:
np_mask[:] = 255
b, c, _, h, w = input_image_latents.shape
initial_latents, timesteps = DDPM_forward_timesteps(input_image_latents,
sample_step_slider, validation_data.num_frames, diffusion_scheduler)
mask = T.ToTensor()(np_mask).to(dtype).to(device)
b, c, f, h, w = initial_latents.shape
mask = T.Resize([h, w], antialias=False)(mask)
motion_mask = self.pipeline.unet.config.in_channels == 9
with torch.no_grad():
if motion_mask:
|
css = """
.toolbutton {
margin-buttom: 0em 0em 0em 0em;
max-width: 2.5em;
min-width: 2.5em !important;
height: 2.5em;
}
"""
class AnimateController:
def __init__(self, pretrained_model_path: str, validation_data,
output_dir, motion_mask = False, motion_strength = False):
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
device=torch.device("cuda")
self.validation_data = validation_data
self.output_dir = output_dir
# self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path,
# torch_dtype=torch.float16, variant="fp16").to(device)
self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path).to(device)
self.sample_idx = 0
def animate(
self,
init_img,
sample_step_slider,
seed_textbox,
fps_textbox,
num_frames_textbox,
motion_bucket_id_slider,
progress=gr.Progress(),
):
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
vae = self.pipeline.vae
diffusion_scheduler = self.pipeline.scheduler
validation_data = self.validation_data
validation_data.fps = int(fps_textbox)
validation_data.num_frames = int(num_frames_textbox)
validation_data.motion_bucket_id = int(motion_bucket_id_slider)
vae_processor = VaeImageProcessor()
device = vae.device
dtype = vae.dtype
pimg = Image.fromarray(init_img["background"]).convert('RGB')
width, height = pimg.size
scale = math.sqrt(width*height / (validation_data.height*validation_data.width))
block_size=64
height = round(height/scale/block_size)*block_size
width = round(width/scale/block_size)*block_size
input_image = vae_processor.preprocess(pimg, height, width)
input_image = input_image.unsqueeze(0).to(dtype).to(device)
input_image_latents = tensor_to_vae_latent(input_image, vae)
np_mask = init_img["layers"][0][:,:,3]
np_mask[np_mask!=0] = 255
if np_mask.sum() == 0:
np_mask[:] = 255
b, c, _, h, w = input_image_latents.shape
initial_latents, timesteps = DDPM_forward_timesteps(input_image_latents,
sample_step_slider, validation_data.num_frames, diffusion_scheduler)
mask = T.ToTensor()(np_mask).to(dtype).to(device)
b, c, f, h, w = initial_latents.shape
mask = T.Resize([h, w], antialias=False)(mask)
motion_mask = self.pipeline.unet.config.in_channels == 9
with torch.no_grad():
if motion_mask: | video_frames = MaskStableVideoDiffusionPipeline.__call__( | 2 | 2023-12-07 08:26:29+00:00 | 8k |
yohanshin/WHAM | lib/models/wham.py | [
{
"identifier": "constants",
"path": "configs/constants.py",
"snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_JOINTS\n H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\n H36M_TO_J14 = H36M_TO_J17[:14]\n J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6]\n COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth'\n TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16]\n S_BIAS = 1e-1\n S_JITTERING = 5e-2\n S_PEAK = 3e-1\n S_PEAK_MASK = 5e-3\n S_MASK = 0.03\n MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints\n FLDR = f'{root}/body_models/smpl/'\n SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl'\n FACES = f'{root}/body_models/smpl_faces.npy'\n MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz'\n JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy'\n JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy'\n JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy'\n JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy'\n PARENTS = torch.tensor([\n -1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21])\nclass PATHS:\nclass KEYPOINTS:\nclass BMODEL:"
},
{
"identifier": "transforms",
"path": "lib/utils/transforms.py",
"snippet": "def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:\ndef _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\ndef _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:\ndef matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:\ndef _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:\ndef euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:\ndef _angle_from_tan(\n axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool\n) -> torch.Tensor:\ndef _index_from_letter(letter: str) -> int:\ndef matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor:\ndef random_quaternions(\n n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None\n) -> torch.Tensor:\ndef random_rotations(\n n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None\n) -> torch.Tensor:\ndef random_rotation(\n dtype: Optional[torch.dtype] = None, device: Optional[Device] = None\n) -> torch.Tensor:\ndef standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:\ndef quaternion_raw_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\ndef quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\ndef quaternion_invert(quaternion: torch.Tensor) -> torch.Tensor:\ndef quaternion_apply(quaternion: torch.Tensor, point: torch.Tensor) -> torch.Tensor:\ndef axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor:\ndef matrix_to_axis_angle(matrix: torch.Tensor) -> torch.Tensor:\ndef axis_angle_to_quaternion(axis_angle: torch.Tensor) -> torch.Tensor:\ndef quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:\ndef rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:\ndef matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:\ndef clean_rotation_6d(d6d: torch.Tensor) -> torch.Tensor:\ndef rot6d_to_rotmat(x):\ndef rotmat_to_rot6d(x):\ndef convert_rotation_matrix_to_homogeneous(rotation_matrix):\ndef rotation_matrix_to_angle_axis(rotation_matrix):\ndef rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\ndef quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:"
},
{
"identifier": "MotionEncoder",
"path": "lib/models/layers/modules.py",
"snippet": "class MotionEncoder(nn.Module):\n def __init__(self, \n in_dim, \n d_embed,\n pose_dr,\n rnn_type,\n n_layers,\n n_joints):\n super().__init__()\n \n self.n_joints = n_joints\n \n self.embed_layer = nn.Linear(in_dim, d_embed)\n self.pos_drop = nn.Dropout(pose_dr)\n \n # Keypoints initializer\n self.neural_init = NeuralInitialization(n_joints * 3 + in_dim, d_embed, rnn_type, n_layers)\n \n # 3d keypoints regressor\n self.regressor = Regressor(\n d_embed, d_embed, [n_joints * 3], n_joints * 3, rnn_type, n_layers)\n \n def forward(self, x, init):\n \"\"\" Forward pass of motion encoder.\n \"\"\"\n \n self.b, self.f = x.shape[:2]\n x = self.embed_layer(x.reshape(self.b, self.f, -1))\n x = self.pos_drop(x)\n \n h0 = self.neural_init(init)\n pred_list = [init[..., :self.n_joints * 3]]\n motion_context_list = []\n \n for i in range(self.f):\n (pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)\n motion_context_list.append(motion_context)\n pred_list.append(pred_kp3d)\n \n pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)\n motion_context = torch.cat(motion_context_list, dim=1)\n \n # Merge 3D keypoints with motion context\n motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)\n return pred_kp3d, motion_context"
},
{
"identifier": "MotionDecoder",
"path": "lib/models/layers/modules.py",
"snippet": "class MotionDecoder(nn.Module):\n def __init__(self, \n d_embed,\n rnn_type,\n n_layers):\n super().__init__()\n \n self.n_pose = 24\n \n # SMPL pose initialization\n self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)\n \n # 3d keypoints regressor\n self.regressor = Regressor(\n d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)\n \n def forward(self, x, init):\n \"\"\" Forward pass of motion decoder.\n \"\"\"\n b, f = x.shape[:2]\n \n h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))\n \n # Recursive prediction of SMPL parameters\n pred_pose_list = [init.reshape(b, 1, -1)]\n pred_shape_list, pred_cam_list, pred_contact_list = [], [], []\n \n for i in range(f):\n # Camera coordinate estimation\n (pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)\n pred_pose_list.append(pred_pose)\n pred_shape_list.append(pred_shape)\n pred_cam_list.append(pred_cam)\n pred_contact_list.append(pred_contact)\n \n pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)\n pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)\n pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)\n pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)\n \n return pred_pose, pred_shape, pred_cam, pred_contact"
},
{
"identifier": "TrajectoryDecoder",
"path": "lib/models/layers/modules.py",
"snippet": "class TrajectoryDecoder(nn.Module):\n def __init__(self, \n d_embed,\n rnn_type,\n n_layers):\n super().__init__()\n \n # Trajectory regressor\n self.regressor = Regressor(\n d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )\n \n def forward(self, x, root, cam_a, h0=None):\n \"\"\" Forward pass of trajectory decoder.\n \"\"\"\n \n b, f = x.shape[:2]\n pred_root_list, pred_vel_list = [root[:, :1]], []\n \n for i in range(f):\n # Global coordinate estimation\n (pred_rootv, pred_rootr), _, h0 = self.regressor(\n x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)\n \n pred_root_list.append(pred_rootr)\n pred_vel_list.append(pred_rootv)\n \n pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)\n pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)\n \n return pred_root, pred_vel"
},
{
"identifier": "TrajectoryRefiner",
"path": "lib/models/layers/modules.py",
"snippet": "class TrajectoryRefiner(nn.Module):\n def __init__(self, \n d_embed,\n d_hidden, \n rnn_type,\n n_layers):\n super().__init__()\n \n d_input = d_embed + 12\n self.refiner = Regressor(\n d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)\n\n def forward(self, context, pred_vel, output, cam_angvel, return_y_up):\n b, f = context.shape[:2]\n \n # Register values\n pred_pose = output['pose'].clone().detach()\n pred_root = output['poses_root_r6d'].clone().detach()\n feet = output['feet'].clone().detach()\n contact = output['contact'].clone().detach()\n \n feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times\n feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input\n inpt_feat = torch.cat([context, feet], dim=-1)\n \n (delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)\n pred_root[:, 1:] = pred_root[:, 1:] + delta_root\n pred_vel = pred_vel + delta_vel\n\n root_world, trans_world = rollout_global_motion(pred_root, pred_vel)\n \n if return_y_up:\n yup2ydown = axis_angle_to_matrix(torch.tensor([[np.pi, 0, 0]])).float().to(root_world.device)\n root_world = yup2ydown.mT @ root_world\n trans_world = (yup2ydown.mT @ trans_world.unsqueeze(-1)).squeeze(-1)\n \n output.update({\n 'poses_root_r6d_refined': pred_root,\n 'vel_root_refined': pred_vel,\n 'poses_root_world': root_world,\n 'trans_world': trans_world,\n })\n \n return output"
},
{
"identifier": "Integrator",
"path": "lib/models/layers/modules.py",
"snippet": "class Integrator(nn.Module):\n def __init__(self, in_channel, out_channel, hid_channel=1024):\n super().__init__()\n \n self.layer1 = nn.Linear(in_channel, hid_channel)\n self.relu1 = nn.ReLU()\n self.dr1 = nn.Dropout(0.1)\n \n self.layer2 = nn.Linear(hid_channel, hid_channel)\n self.relu2 = nn.ReLU()\n self.dr2 = nn.Dropout(0.1)\n \n self.layer3 = nn.Linear(hid_channel, out_channel)\n \n \n def forward(self, x, feat):\n res = x\n mask = (feat != 0).all(dim=-1).all(dim=-1)\n \n out = torch.cat((x, feat), dim=-1)\n out = self.layer1(out)\n out = self.relu1(out)\n out = self.dr1(out)\n \n out = self.layer2(out)\n out = self.relu2(out)\n out = self.dr2(out)\n \n out = self.layer3(out)\n out[mask] = out[mask] + res[mask]\n \n return out"
},
{
"identifier": "rollout_global_motion",
"path": "lib/models/layers/utils.py",
"snippet": "def rollout_global_motion(root_r, root_v, init_trans=None):\n b, f = root_v.shape[:2]\n root = transforms.rotation_6d_to_matrix(root_r[:])\n vel_world = (root[:, :-1] @ root_v.unsqueeze(-1)).squeeze(-1)\n trans = torch.cumsum(vel_world, dim=1)\n \n if init_trans is not None: trans = trans + init_trans\n return root[:, 1:], trans"
},
{
"identifier": "compute_camera_pose",
"path": "lib/models/layers/utils.py",
"snippet": "def compute_camera_pose(root_c_d6d, root_w):\n root_c = transforms.rotation_6d_to_matrix(root_c_d6d) # Root orient in cam coord\n cam_R = root_c @ root_w.mT\n return cam_R"
},
{
"identifier": "reset_root_velocity",
"path": "lib/models/layers/utils.py",
"snippet": "def reset_root_velocity(smpl, output, stationary, pred_ori, pred_vel, thr=0.7):\n b, f = pred_vel.shape[:2]\n \n stationary_mask = (stationary.clone().detach() > thr).unsqueeze(-1).float()\n poses_root = transforms.rotation_6d_to_matrix(pred_ori.clone().detach())\n vel_world = (poses_root[:, 1:] @ pred_vel.clone().detach().unsqueeze(-1)).squeeze(-1)\n \n output = smpl.get_output(body_pose=output.body_pose.clone().detach(),\n global_orient=poses_root[:, 1:].reshape(-1, 1, 3, 3),\n betas=output.betas.clone().detach(),\n pose2rot=False)\n feet = output.feet.reshape(b, f, 4, 3)\n feet_vel = feet[:, 1:] - feet[:, :-1] + vel_world[:, 1:].unsqueeze(-2)\n feet_vel = torch.cat((torch.zeros_like(feet_vel[:, :1]), feet_vel), dim=1)\n \n stationary_vel = feet_vel * stationary_mask\n del_vel = stationary_vel.sum(dim=2) / ((stationary_vel != 0).sum(dim=2) + 1e-4)\n vel_world_update = vel_world - del_vel\n \n vel_root = (poses_root[:, 1:].mT @ vel_world_update.unsqueeze(-1)).squeeze(-1)\n \n return vel_root"
},
{
"identifier": "compute_camera_motion",
"path": "lib/models/layers/utils.py",
"snippet": "def compute_camera_motion(output, root_c_d6d, root_w, trans, pred_cam):\n root_c = transforms.rotation_6d_to_matrix(root_c_d6d) # Root orient in cam coord\n cam_R = root_c @ root_w.mT\n pelvis_cam = output.full_cam.view_as(pred_cam)\n pelvis_world = (cam_R.mT @ pelvis_cam.unsqueeze(-1)).squeeze(-1)\n cam_T_world = pelvis_world - trans\n cam_T = (cam_R @ cam_T_world.unsqueeze(-1)).squeeze(-1)\n \n return cam_R, cam_T"
}
] | import torch
from torch import nn
from configs import constants as _C
from lib.utils import transforms
from lib.models.layers import (MotionEncoder, MotionDecoder, TrajectoryDecoder, TrajectoryRefiner, Integrator,
rollout_global_motion, compute_camera_pose, reset_root_velocity, compute_camera_motion) | 4,909 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Network(nn.Module):
def __init__(self,
smpl,
pose_dr=0.1,
d_embed=512,
n_layers=3,
d_feat=2048,
rnn_type='LSTM',
**kwargs
):
super().__init__()
n_joints = _C.KEYPOINTS.NUM_JOINTS
self.smpl = smpl
in_dim = n_joints * 2 + 3
d_context = d_embed + n_joints * 3
self.mask_embedding = nn.Parameter(torch.zeros(1, 1, n_joints, 2))
# Module 1. Motion Encoder
self.motion_encoder = MotionEncoder(in_dim=in_dim,
d_embed=d_embed,
pose_dr=pose_dr,
rnn_type=rnn_type,
n_layers=n_layers,
n_joints=n_joints)
self.trajectory_decoder = TrajectoryDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 3. Feature Integrator
self.integrator = Integrator(in_channel=d_feat + d_context,
out_channel=d_context)
# Module 4. Motion Decoder
self.motion_decoder = MotionDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 5. Trajectory Refiner
self.trajectory_refiner = TrajectoryRefiner(d_embed=d_context,
d_hidden=d_embed,
rnn_type=rnn_type,
n_layers=2)
@property
def __version__(self, ):
return 'v07'
def compute_global_feet(self, duplicate=False):
# Global motion
init_trans = None# if self.training else self.output.full_cam.reshape(self.b, self.f, 3)[:, [0]]
root_world, trans = rollout_global_motion(self.pred_root, self.pred_vel, init_trans)
# # Compute world-coordinate motion
# if not duplicate:
# self.global_output = self.smpl.get_output(
# global_orient=root_world.reshape(self.b * self.f, 1, 3, 3), body_pose=self.output.body_pose,
# betas=self.output.betas, pose2rot=False
# )
# feet_world = self.global_output.feet.reshape(self.b, self.f, 4, 3) + trans.unsqueeze(-2)
cam_R, cam_T = compute_camera_motion(self.output, self.pred_pose[:, :, :6], root_world, trans, self.pred_cam)
feet_cam = self.output.feet.reshape(self.b, self.f, -1, 3) + self.output.full_cam.reshape(self.b, self.f, 1, 3)
feet_world = (cam_R.mT @ (feet_cam - cam_T.unsqueeze(-2)).mT).mT
return feet_world
def forward_smpl(self, **kwargs):
self.output = self.smpl(self.pred_pose,
self.pred_shape,
cam=self.pred_cam,
return_full_pose=not self.training,
**kwargs,
)
kp3d = self.output.joints
# Feet location in global coordinate
feet_world = self.compute_global_feet()
# Return output
output = {'feet': feet_world,
'contact': self.pred_contact,
'pose': self.pred_pose,
'betas': self.pred_shape,
'poses_root_cam': self.output.global_orient,
'verts_cam': self.output.vertices}
if self.training:
pass # TODO: Update training code
else:
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Network(nn.Module):
def __init__(self,
smpl,
pose_dr=0.1,
d_embed=512,
n_layers=3,
d_feat=2048,
rnn_type='LSTM',
**kwargs
):
super().__init__()
n_joints = _C.KEYPOINTS.NUM_JOINTS
self.smpl = smpl
in_dim = n_joints * 2 + 3
d_context = d_embed + n_joints * 3
self.mask_embedding = nn.Parameter(torch.zeros(1, 1, n_joints, 2))
# Module 1. Motion Encoder
self.motion_encoder = MotionEncoder(in_dim=in_dim,
d_embed=d_embed,
pose_dr=pose_dr,
rnn_type=rnn_type,
n_layers=n_layers,
n_joints=n_joints)
self.trajectory_decoder = TrajectoryDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 3. Feature Integrator
self.integrator = Integrator(in_channel=d_feat + d_context,
out_channel=d_context)
# Module 4. Motion Decoder
self.motion_decoder = MotionDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 5. Trajectory Refiner
self.trajectory_refiner = TrajectoryRefiner(d_embed=d_context,
d_hidden=d_embed,
rnn_type=rnn_type,
n_layers=2)
@property
def __version__(self, ):
return 'v07'
def compute_global_feet(self, duplicate=False):
# Global motion
init_trans = None# if self.training else self.output.full_cam.reshape(self.b, self.f, 3)[:, [0]]
root_world, trans = rollout_global_motion(self.pred_root, self.pred_vel, init_trans)
# # Compute world-coordinate motion
# if not duplicate:
# self.global_output = self.smpl.get_output(
# global_orient=root_world.reshape(self.b * self.f, 1, 3, 3), body_pose=self.output.body_pose,
# betas=self.output.betas, pose2rot=False
# )
# feet_world = self.global_output.feet.reshape(self.b, self.f, 4, 3) + trans.unsqueeze(-2)
cam_R, cam_T = compute_camera_motion(self.output, self.pred_pose[:, :, :6], root_world, trans, self.pred_cam)
feet_cam = self.output.feet.reshape(self.b, self.f, -1, 3) + self.output.full_cam.reshape(self.b, self.f, 1, 3)
feet_world = (cam_R.mT @ (feet_cam - cam_T.unsqueeze(-2)).mT).mT
return feet_world
def forward_smpl(self, **kwargs):
self.output = self.smpl(self.pred_pose,
self.pred_shape,
cam=self.pred_cam,
return_full_pose=not self.training,
**kwargs,
)
kp3d = self.output.joints
# Feet location in global coordinate
feet_world = self.compute_global_feet()
# Return output
output = {'feet': feet_world,
'contact': self.pred_contact,
'pose': self.pred_pose,
'betas': self.pred_shape,
'poses_root_cam': self.output.global_orient,
'verts_cam': self.output.vertices}
if self.training:
pass # TODO: Update training code
else: | pose = transforms.matrix_to_axis_angle(self.output.full_pose).reshape(-1, 72) | 1 | 2023-12-08 09:17:54+00:00 | 8k |
octo-models/octo | octo/data/dataset.py | [
{
"identifier": "obs_transforms",
"path": "octo/data/obs_transforms.py",
"snippet": "def augment(\n obs: dict, seed: tf.Tensor, augment_kwargs: Union[dict, Mapping[str, dict]]\n) -> dict:\ndef decode_and_resize(\n obs: dict,\n resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]],\n depth_resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]],\n) -> dict:"
},
{
"identifier": "traj_transforms",
"path": "octo/data/traj_transforms.py",
"snippet": "def chunk_act_obs(\n traj: dict,\n window_size: int,\n future_action_window_size: int = 0,\n) -> dict:\ndef subsample(traj: dict, subsample_length: int) -> dict:\ndef add_pad_mask_dict(traj: dict) -> dict:"
},
{
"identifier": "goal_relabeling",
"path": "octo/data/utils/goal_relabeling.py",
"snippet": "def uniform(traj: dict) -> dict:"
},
{
"identifier": "task_augmentation",
"path": "octo/data/utils/task_augmentation.py",
"snippet": "def delete_task_conditioning(\n traj: dict,\n keep_image_prob: float,\n):"
},
{
"identifier": "allocate_threads",
"path": "octo/data/utils/data_utils.py",
"snippet": "def allocate_threads(n: Optional[int], weights: np.ndarray):\n \"\"\"Allocates an integer number of threads across datasets based on weights. The final array sums to `n`,\n but each element is no less than 1. If `n` is None, then every dataset is assigned a value of AUTOTUNE.\n \"\"\"\n if n is None:\n return np.array([tf.data.AUTOTUNE] * len(weights))\n\n assert np.all(weights >= 0), \"Weights must be non-negative\"\n assert (\n len(weights) <= n\n ), \"Number of threads must be at least as large as length of weights\"\n weights = np.array(weights) / np.sum(weights)\n\n allocation = np.zeros_like(weights, dtype=int)\n while True:\n # give the remaining elements that would get less than 1 a 1\n mask = (weights * n < 1) & (weights > 0)\n if not mask.any():\n break\n n -= mask.sum()\n allocation += mask.astype(int)\n # recompute the distribution over the remaining elements\n weights[mask] = 0\n weights = weights / weights.sum()\n # allocate the remaining elements\n fractional, integral = np.modf(weights * n)\n allocation += integral.astype(int)\n n -= integral.sum()\n for i in np.argsort(fractional)[::-1][: int(n)]:\n allocation[i] += 1\n return allocation"
},
{
"identifier": "get_dataset_statistics",
"path": "octo/data/utils/data_utils.py",
"snippet": "def get_dataset_statistics(\n dataset: dl.DLataset,\n hash_dependencies: Tuple[str, ...],\n save_dir: Optional[str] = None,\n) -> dict:\n \"\"\"Either computes the statistics of a dataset or loads them from a cache file if this function has been\n called before with the same `hash_dependencies`. Currently, the statistics include the min/max/mean/std of\n the actions and proprio as well as the number of transitions and trajectories in the dataset.\n \"\"\"\n unique_hash = hashlib.sha256(\n \"\".join(hash_dependencies).encode(\"utf-8\"),\n usedforsecurity=False,\n ).hexdigest()\n\n # fallback local path for when data_dir is not writable or not provided\n local_path = os.path.expanduser(\n os.path.join(\n \"~\",\n \".cache\",\n \"octo\",\n f\"dataset_statistics_{unique_hash}.json\",\n )\n )\n\n if save_dir is not None:\n path = tf.io.gfile.join(save_dir, f\"dataset_statistics_{unique_hash}.json\")\n else:\n path = local_path\n\n # check if cache file exists and load\n if tf.io.gfile.exists(path):\n logging.info(f\"Loading existing dataset statistics from {path}.\")\n with tf.io.gfile.GFile(path, \"r\") as f:\n metadata = json.load(f)\n return metadata\n\n if os.path.exists(local_path):\n logging.info(f\"Loading existing dataset statistics from {local_path}.\")\n with open(local_path, \"r\") as f:\n metadata = json.load(f)\n return metadata\n\n dataset = dataset.traj_map(\n lambda traj: {\n \"action\": traj[\"action\"],\n \"proprio\": traj[\"observation\"][\"proprio\"]\n if \"proprio\" in traj[\"observation\"]\n else tf.zeros_like(traj[\"action\"]),\n }\n )\n\n cardinality = dataset.cardinality().numpy()\n if cardinality == tf.data.INFINITE_CARDINALITY:\n raise ValueError(\"Cannot compute dataset statistics for infinite datasets.\")\n\n logging.info(\n \"Computing dataset statistics. This may take awhile, but should only need to happen \"\n \"once for each dataset.\"\n )\n actions = []\n proprios = []\n num_transitions = 0\n num_trajectories = 0\n for traj in tqdm.tqdm(\n dataset.iterator(),\n total=cardinality if cardinality != tf.data.UNKNOWN_CARDINALITY else None,\n ):\n actions.append(traj[\"action\"])\n proprios.append(traj[\"proprio\"])\n num_transitions += traj[\"action\"].shape[0]\n num_trajectories += 1\n actions = np.concatenate(actions)\n proprios = np.concatenate(proprios)\n metadata = {\n \"action\": {\n \"mean\": actions.mean(0).tolist(),\n \"std\": actions.std(0).tolist(),\n \"max\": actions.max(0).tolist(),\n \"min\": actions.min(0).tolist(),\n },\n \"proprio\": {\n \"mean\": proprios.mean(0).tolist(),\n \"std\": proprios.std(0).tolist(),\n \"max\": proprios.max(0).tolist(),\n \"min\": proprios.min(0).tolist(),\n },\n \"num_transitions\": num_transitions,\n \"num_trajectories\": num_trajectories,\n }\n\n try:\n with tf.io.gfile.GFile(path, \"w\") as f:\n json.dump(metadata, f)\n except tf.errors.PermissionDeniedError:\n logging.warning(\n f\"Could not write dataset statistics to {path}. \"\n f\"Writing to {local_path} instead.\"\n )\n os.makedirs(os.path.dirname(local_path), exist_ok=True)\n with open(local_path, \"w\") as f:\n json.dump(metadata, f)\n\n return metadata"
},
{
"identifier": "NormalizationType",
"path": "octo/data/utils/data_utils.py",
"snippet": "class NormalizationType(str, Enum):\n \"\"\"Defines supported normalization schemes for action and proprio.\"\"\"\n\n NORMAL = \"normal\" # normalize to mean 0, std 1\n BOUNDS = \"bounds\" # normalize to [-1, 1]"
},
{
"identifier": "normalize_action_and_proprio",
"path": "octo/data/utils/data_utils.py",
"snippet": "def normalize_action_and_proprio(\n traj: dict, metadata: dict, normalization_type: NormalizationType\n):\n \"\"\"Normalizes the action and proprio fields of a trajectory using the given metadata.\"\"\"\n # maps keys of `metadata` to corresponding keys in `traj`\n keys_to_normalize = {\n \"action\": \"action\",\n \"proprio\": \"observation/proprio\",\n }\n if normalization_type == NormalizationType.NORMAL:\n # normalize to mean 0, std 1\n for key, traj_key in keys_to_normalize.items():\n mask = metadata[key].get(\n \"mask\", tf.ones_like(metadata[key][\"mean\"], dtype=tf.bool)\n )\n traj = dl.transforms.selective_tree_map(\n traj,\n match=lambda k, _: k == traj_key,\n map_fn=lambda x: tf.where(\n mask, (x - metadata[key][\"mean\"]) / (metadata[key][\"std\"] + 1e-8), x\n ),\n )\n return traj\n\n if normalization_type == NormalizationType.BOUNDS:\n # normalize to [-1, 1]\n for key, traj_key in keys_to_normalize.items():\n mask = metadata[key].get(\n \"mask\", tf.ones_like(metadata[key][\"min\"], dtype=tf.bool)\n )\n traj = dl.transforms.selective_tree_map(\n traj,\n match=lambda k, _: k == traj_key,\n map_fn=lambda x: tf.where(\n mask,\n tf.clip_by_value(\n 2\n * (x - metadata[key][\"min\"])\n / (metadata[key][\"max\"] - metadata[key][\"min\"] + 1e-8)\n - 1,\n -1,\n 1,\n ),\n x,\n ),\n )\n return traj\n\n raise ValueError(f\"Unknown normalization type {normalization_type}\")"
},
{
"identifier": "pprint_data_mixture",
"path": "octo/data/utils/data_utils.py",
"snippet": "def pprint_data_mixture(\n dataset_kwargs_list: List[Dict[str, Any]], dataset_weights: List[int]\n) -> None:\n print(\n \"\\n######################################################################################\"\n )\n print(\n f\"# Loading the following {len(dataset_kwargs_list)} datasets (incl. sampling weight):{'': >24} #\"\n )\n for dataset_kwargs, weight in zip(dataset_kwargs_list, dataset_weights):\n pad = 80 - len(dataset_kwargs[\"name\"])\n print(f\"# {dataset_kwargs['name']}: {weight:=>{pad}f} #\")\n print(\n \"######################################################################################\\n\"\n )"
},
{
"identifier": "tree_map",
"path": "octo/data/utils/data_utils.py",
"snippet": "def tree_map(fn: Callable, tree: dict) -> dict:\n \"\"\"Maps a function over a nested dictionary.\"\"\"\n return {\n k: tree_map(fn, v) if isinstance(v, dict) else fn(v) for k, v in tree.items()\n }"
}
] | from functools import partial
from typing import Callable, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
from octo.data import obs_transforms, traj_transforms
from octo.data.utils import goal_relabeling, task_augmentation
from octo.data.utils.data_utils import (
allocate_threads,
get_dataset_statistics,
NormalizationType,
normalize_action_and_proprio,
pprint_data_mixture,
tree_map,
)
import inspect
import json
import dlimp as dl
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds | 4,055 | this length (after goal relabeling and chunking).
skip_unlabeled (bool, optional): Whether to skip trajectories with no language labels.
max_action: (float, optional): If provided, trajectories in which *any* action dimension
of *any* transition has an absolute value larger than this will be skipped.
max_proprio: (float, optional): If provided, trajectories in which *any* proprio dimension
of *any* transition has an absolute value larger than this will be skipped.
task_augment_strategy (str, optional): The task augmentation strategy to use, or None for no task
augmentation. See `task_augmentation.py`.
task_augment_kwargs (dict, optional): Additional keyword arguments to pass to the task augmentation
function.
num_parallel_calls (int, optional): number of parallel calls for map operations. Default to AUTOTUNE.
"""
if skip_unlabeled:
if "language_instruction" not in dataset.element_spec["task"]:
raise ValueError(
"skip_unlabeled=True but dataset does not have language labels."
)
dataset = dataset.filter(
lambda x: tf.math.reduce_any(x["task"]["language_instruction"] != "")
)
if max_action is not None:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(tf.math.abs(x["action"]) <= max_action)
)
if max_proprio is not None and "proprio" in dataset.element_spec["observation"]:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(
tf.math.abs(x["observation"]["proprio"]) <= max_proprio
)
)
# marks which entires of the observation and task dicts are padding
dataset = dataset.traj_map(traj_transforms.add_pad_mask_dict, num_parallel_calls)
# updates the "task" dict
if goal_relabeling_strategy is not None:
dataset = dataset.traj_map(
partial(
getattr(goal_relabeling, goal_relabeling_strategy),
**goal_relabeling_kwargs,
),
num_parallel_calls,
)
# must run task augmentation before chunking, in case it changes goal timesteps
if train and task_augment_strategy is not None:
# perform task augmentation (e.g., dropping keys)
dataset = dataset.traj_map(
partial(
getattr(task_augmentation, task_augment_strategy),
**task_augment_kwargs,
),
num_parallel_calls,
)
# chunks observations and actions, giving them a new axis at index 1 of size `window_size` and
# `window_size + future_action_window_size`, respectively
dataset = dataset.traj_map(
partial(
traj_transforms.chunk_act_obs,
window_size=window_size,
future_action_window_size=future_action_window_size,
),
num_parallel_calls,
)
if train and subsample_length is not None:
dataset = dataset.traj_map(
partial(traj_transforms.subsample, subsample_length=subsample_length),
num_parallel_calls,
)
return dataset
def apply_frame_transforms(
dataset: dl.DLataset,
*,
train: bool,
image_augment_kwargs: Union[dict, Mapping[str, dict]] = {},
resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
depth_resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a frame level. These transforms are usually more
CPU-intensive, (e.g. decoding or resizing images).
Args:
train (bool): Whether the dataset is for training (affects image augmentation).
dataset (dl.DLataset): The dataset to transform.
image_augment_kwargs (dict|Mapping[str, dict]): Keyword arguments to pass to the image augmentation
function. See `dlimp.transforms.augment_image` for documentation of these kwargs. If a dict of
dicts is provided, then key "k" will be used for "image_{k}" (names determined by `image_obs_keys`
in `make_dataset_from_rlds`). Augmentation will be skipped for missing keys (so pass an empty dict
to skip augmentation for all images).
resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): If provided, images will be resized to
this size. If a dict of tuples is provided, then key "k" will be used for "image_{k}" (names
determined by `image_obs_keys` in `make_dataset_from_rlds`). Resizing will be skipped for missing
keys (so pass an empty dict to skip resizing for all images).
depth_resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): Same as resize_size, but for depth
images.
num_parallel_calls (int): number of parallel calls for frame_map operations. Default to AUTOTUNE.
"""
# convenience wrapper that takes a function that operates on a non-chunked "observation" dict and applies
# it to the chunked "observation" dict as well as the non-chunked "task" dict
def apply_obs_transform(fn: Callable[[dict], dict], frame: dict) -> dict:
# task is not chunked -- apply fn directly
frame["task"] = fn(frame["task"])
# observation is chunked -- apply fn along first axis
frame["observation"] = dl.vmap(fn)(frame["observation"])
return frame
# decode + resize images (and depth images)
dataset = dataset.frame_map(
partial(
apply_obs_transform,
partial(
|
def apply_trajectory_transforms(
dataset: dl.DLataset,
*,
train: bool,
goal_relabeling_strategy: Optional[str] = None,
goal_relabeling_kwargs: dict = {},
window_size: int = 1,
future_action_window_size: int = 0,
subsample_length: Optional[int] = None,
skip_unlabeled: bool = False,
max_action: Optional[float] = None,
max_proprio: Optional[float] = None,
task_augment_strategy: Optional[str] = None,
task_augment_kwargs: dict = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a trajectory level. Such transforms are usually some sort of
"relabeling" (e.g. filtering, chunking, adding goals, dropping keys). Transforms that happen in this
function should have the following properties:
- They require access to an entire trajectory (i.e. they cannot be applied in a frame-wise manner).
- They are generally not CPU-intensive, mostly involving moving and copying data.
- They do not require decoded images.
Args:
dataset (dl.DLataset): The dataset to transform.
train (bool): Whether the dataset is for training (affects subsampling).
goal_relabeling_strategy (str, optional): The goal relabeling strategy to use, or None for
no goal relabeling. See `goal_relabeling.py`.
goal_relabeling_kwargs (dict, optional): Additional keyword arguments to pass to the goal relabeling function.
window_size (int, optional): The length of the snippets that trajectories are chunked into.
future_action_window_size (int, optional): The number of future actions beyond window_size to include
in the chunked actions.
subsample_length (int, optional): If provided, trajectories longer than this will be subsampled to
this length (after goal relabeling and chunking).
skip_unlabeled (bool, optional): Whether to skip trajectories with no language labels.
max_action: (float, optional): If provided, trajectories in which *any* action dimension
of *any* transition has an absolute value larger than this will be skipped.
max_proprio: (float, optional): If provided, trajectories in which *any* proprio dimension
of *any* transition has an absolute value larger than this will be skipped.
task_augment_strategy (str, optional): The task augmentation strategy to use, or None for no task
augmentation. See `task_augmentation.py`.
task_augment_kwargs (dict, optional): Additional keyword arguments to pass to the task augmentation
function.
num_parallel_calls (int, optional): number of parallel calls for map operations. Default to AUTOTUNE.
"""
if skip_unlabeled:
if "language_instruction" not in dataset.element_spec["task"]:
raise ValueError(
"skip_unlabeled=True but dataset does not have language labels."
)
dataset = dataset.filter(
lambda x: tf.math.reduce_any(x["task"]["language_instruction"] != "")
)
if max_action is not None:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(tf.math.abs(x["action"]) <= max_action)
)
if max_proprio is not None and "proprio" in dataset.element_spec["observation"]:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(
tf.math.abs(x["observation"]["proprio"]) <= max_proprio
)
)
# marks which entires of the observation and task dicts are padding
dataset = dataset.traj_map(traj_transforms.add_pad_mask_dict, num_parallel_calls)
# updates the "task" dict
if goal_relabeling_strategy is not None:
dataset = dataset.traj_map(
partial(
getattr(goal_relabeling, goal_relabeling_strategy),
**goal_relabeling_kwargs,
),
num_parallel_calls,
)
# must run task augmentation before chunking, in case it changes goal timesteps
if train and task_augment_strategy is not None:
# perform task augmentation (e.g., dropping keys)
dataset = dataset.traj_map(
partial(
getattr(task_augmentation, task_augment_strategy),
**task_augment_kwargs,
),
num_parallel_calls,
)
# chunks observations and actions, giving them a new axis at index 1 of size `window_size` and
# `window_size + future_action_window_size`, respectively
dataset = dataset.traj_map(
partial(
traj_transforms.chunk_act_obs,
window_size=window_size,
future_action_window_size=future_action_window_size,
),
num_parallel_calls,
)
if train and subsample_length is not None:
dataset = dataset.traj_map(
partial(traj_transforms.subsample, subsample_length=subsample_length),
num_parallel_calls,
)
return dataset
def apply_frame_transforms(
dataset: dl.DLataset,
*,
train: bool,
image_augment_kwargs: Union[dict, Mapping[str, dict]] = {},
resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
depth_resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a frame level. These transforms are usually more
CPU-intensive, (e.g. decoding or resizing images).
Args:
train (bool): Whether the dataset is for training (affects image augmentation).
dataset (dl.DLataset): The dataset to transform.
image_augment_kwargs (dict|Mapping[str, dict]): Keyword arguments to pass to the image augmentation
function. See `dlimp.transforms.augment_image` for documentation of these kwargs. If a dict of
dicts is provided, then key "k" will be used for "image_{k}" (names determined by `image_obs_keys`
in `make_dataset_from_rlds`). Augmentation will be skipped for missing keys (so pass an empty dict
to skip augmentation for all images).
resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): If provided, images will be resized to
this size. If a dict of tuples is provided, then key "k" will be used for "image_{k}" (names
determined by `image_obs_keys` in `make_dataset_from_rlds`). Resizing will be skipped for missing
keys (so pass an empty dict to skip resizing for all images).
depth_resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): Same as resize_size, but for depth
images.
num_parallel_calls (int): number of parallel calls for frame_map operations. Default to AUTOTUNE.
"""
# convenience wrapper that takes a function that operates on a non-chunked "observation" dict and applies
# it to the chunked "observation" dict as well as the non-chunked "task" dict
def apply_obs_transform(fn: Callable[[dict], dict], frame: dict) -> dict:
# task is not chunked -- apply fn directly
frame["task"] = fn(frame["task"])
# observation is chunked -- apply fn along first axis
frame["observation"] = dl.vmap(fn)(frame["observation"])
return frame
# decode + resize images (and depth images)
dataset = dataset.frame_map(
partial(
apply_obs_transform,
partial( | obs_transforms.decode_and_resize, | 0 | 2023-12-13 09:58:56+00:00 | 8k |
LinShan-Bin/OccNeRF | networks/occupancy_decoder.py | [
{
"identifier": "geom",
"path": "utils/geom.py",
"snippet": "def eye_4x4(B, device='cuda'):\ndef safe_inverse(a): #parallel version\ndef safe_inverse_single(a):\ndef apply_4x4(RT, xyz):\ndef get_camM_T_camXs(origin_T_camXs, ind=0):\ndef split_rt_single(rt):\ndef split_rt(rt):\ndef merge_rt(r, t):\ndef xyd2pointcloud(xyd, pix_T_cam):\ndef pixels2camera(x, y, z, fx, fy, x0, y0):\ndef camera2pixels(xyz, pix_T_cam):\ndef scale_intrinsics(K, sx, sy):\ndef split_intrinsics(K):\ndef merge_intrinsics(fx, fy, x0, y0):\ndef merge_rtlist(rlist, tlist):\ndef split_lrtlist(lrtlist):\ndef merge_lrtlist(lenlist, rtlist):\ndef apply_4x4_to_lrtlist(Y_T_X, lrtlist_X):\ndef apply_4x4_to_lrt(Y_T_X, lrt_X):\ndef get_xyzlist_from_lenlist(lenlist):\ndef get_xyzlist_from_lrtlist(lrtlist, include_clist=False):\ndef get_clist_from_lrtlist(lrtlist):\ndef wrap2pi(rad_angle):\ndef unproject(cam2world, intrinsic, depth):\ndef reproject(cam2world_src, cam2world_tar, W, H, intrinsic, depth_src, depth_tar, color_tar, mask_tar):\n def make_grid(x, y):\ndef visualize_depth(depth, mask=None, depth_min=None, depth_max=None, direct=False):\ndef mat2pose_vec(matrix: torch.Tensor):\ndef square_distance(src, dst):\n B, _, _ = list(a.shape)\n B, N, _ = list(xyz.shape)\n B, S = list(origin_T_camXs.shape)[0:2]\n B, C, D = list(r.shape)\n B2, D2 = list(t.shape)\n B, N, C = list(xyd.shape)\n B = x.shape[0]\n B = list(z.shape)[0]\n EPS = 1e-4\n K = merge_intrinsics(fx, fy, x0, y0)\n B = list(fx.shape)[0]\n K = torch.zeros(B, 4, 4, dtype=torch.float32, device=fx.device)\n K[:,0,0] = fx\n K[:,1,1] = fy\n K[:,0,2] = x0\n K[:,1,2] = y0\n K[:,2,2] = 1.0\n K[:,3,3] = 1.0\n B, N, D, E = list(rlist.shape)\n B, N, F = list(tlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lenlist.shape)\n B2, N2, E, F = list(rtlist.shape)\n B, N, D = list(lrtlist_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, D = list(lrt_X.shape)\n B2, E, F = list(Y_T_X.shape)\n B, N, D = list(lenlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, D = list(lrtlist.shape)\n B, N, _ = src.shape\n _, M, _ = dst.shape"
},
{
"identifier": "vox",
"path": "utils/vox.py",
"snippet": "def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):\ndef contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):\n def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):\n def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):\n def Mem2Ref(self, xyz_mem, Z, Y, X, assert_cube=False):\n def get_mem_T_ref(self, B, Z, Y, X, assert_cube=False, device='cuda'):\n def get_ref_T_mem(self, B, Z, Y, X, assert_cube=False, device='cuda'):\n def get_inbounds(self, xyz, Z, Y, X, already_mem=False, padding=0.0, assert_cube=False):\n def voxelize_xyz(self, xyz_ref, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):\n def voxelize_xyz_and_feats(self, xyz_ref, feats, Z, Y, X, already_mem=False, assert_cube=False, clean_eps=0):\n def get_occupancy(self, xyz, Z, Y, X, clean_eps=0, xyz_zero=None):\n def get_feat_occupancy(self, xyz, feat, Z, Y, X, clean_eps=0, xyz_zero=None):\n def unproject_image_to_mem(self, rgb_camB, pixB_T_camA, camB_T_camA, Z, Y, X, assert_cube=False):\n def get_meta_data(self, cam_center, camB_T_camA = None, abs_position=False, assert_cube=False):\n def get_voxel_position(self, cam_center, abs_position=True, assert_cube=False):\n def apply_mem_T_ref_to_lrtlist(self, lrtlist_cam, Z, Y, X, assert_cube=False):\nclass Vox_util(nn.Module):\n B, N, C = list(xyz.shape)\n B, N, C = list(xyz_mem.shape)\n B, N, D = list(xyz_ref.shape)\n B, N, D = list(xyz_ref.shape)\n B2, N2, D2 = list(feats.shape)\n B, N, C = list(xyz.shape)\n B, N, C = list(xyz.shape)\n B2, N2, D2 = list(feat.shape)\n B, C, H, W = list(rgb_camB.shape)\n EPS=1e-6\n Z, Y, X = self.Z, self.Y, self.X\n Z, Y, X = self.Z, self.Y, self.X\n B, N, C = list(lrtlist_cam.shape)"
},
{
"identifier": "basic",
"path": "utils/basic.py",
"snippet": "EPS = 1e-6\n B_, S = shapelist[:2]\n BS = shapelist[0]\n S = int(BS/B)\ndef strnum(x):\ndef matmul2(mat1, mat2):\ndef pack_seqdim(tensor, B):\ndef unpack_seqdim(tensor, B):\ndef reduce_masked_mean(x, mask, dim=None, keepdim=False):\ndef meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'):\ndef gridcloud3d(B, Z, Y, X, norm=False, device='cuda'):\ndef normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True):"
},
{
"identifier": "render",
"path": "utils/render.py",
"snippet": "def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):\ndef ndc_rays(H, W, focal, near, rays_o, rays_d):\ndef get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):\ndef cumprod_exclusive(p):\ndef get_ray_marching_ray(alpha):\ndef sample_ray(self, rays_o, rays_d, near, far, stepsize, xyz_min, xyz_max, voxel_size, is_train=False):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val, beta_min=0.0001):\n def forward(self, sdf, beta=None):\n def get_beta(self):\n def __init__(self, init_val):\n def forward(self, x):\n def get_variance(self):\nclass SigmoidDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass LaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).cdf(-sdf)\nclass SingleVarianceNetwork(nn.Module):"
},
{
"identifier": "S3DCNN",
"path": "networks/_3DCNN.py",
"snippet": "class S3DCNN(nn.Module):\n def __init__(self, input_planes = 64, out_planes = 1, planes = 16, conv_3d_types1 = \"3D\", activate_fun = nn.ReLU(inplace=True), opt = None):\n super(S3DCNN, self).__init__()\n self.out_planes = out_planes\n\n self.opt = opt\n self.dres0 = nn.Sequential(convbn_3d(input_planes, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun)\n\n\n self.dres1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1))\n\n self.dres2 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n self.dres3 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n self.dres4 = hourglass_PSMNet(planes*2, conv_3d_types1 = conv_3d_types1, activate_fun = activate_fun)\n\n\n self.classif1 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))\n\n self.classif2 = nn.Sequential(convbn_3d(planes*2, planes*2, 3, 1, 1, conv_3d_types = conv_3d_types1),\n activate_fun,\n nn.Conv3d(planes*2, out_planes, kernel_size=3, padding=1, stride=1,bias=False))\n\n\n\n self.classif3 = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, self.out_planes, 3, 1, 1, conv_3d_types=conv_3d_types1),)\n if self.opt.use_semantic:\n self.classif_semantic = nn.Sequential(convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, planes * 2, 3, 1, 1, conv_3d_types=conv_3d_types1),\n activate_fun,\n\n convbn_3d(planes * 2, self.opt.semantic_classes, 3, 1, 1, conv_3d_types=conv_3d_types1),)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n if self.opt.render_type == 'density':\n pass\n \n def geo_param(self):\n return list(self.dres0.parameters()) + \\\n list(self.dres1.parameters()) + \\\n list(self.dres2.parameters()) + \\\n list(self.dres3.parameters()) + \\\n list(self.dres4.parameters()) + \\\n list(self.classif1.parameters()) + \\\n list(self.classif2.parameters()) + \\\n list(self.classif3.parameters())\n \n def sem_head_param(self):\n if self.opt.use_semantic:\n return self.classif_semantic.parameters()\n else:\n return None\n\n def forward(self, cost):\n\n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n\n out1, pre1, post1 = self.dres2(cost0, None, None)\n\n out1 = out1+cost0\n\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2+cost0\n\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n\n if self.opt.use_semantic:\n if self.opt.last_free:\n out = self.classif_semantic(out3)\n else:\n semantic = self.classif_semantic(out3)\n cost3 = self.classif3(out3)\n out = torch.cat([semantic, cost3], dim=1)\n return [out]\n else:\n cost3 = self.classif3(out3)\n return [cost3]"
}
] | import pdb
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch_efficient_distloss import eff_distloss, eff_distloss_native
from utils import geom
from utils import vox
from utils import basic
from utils import render
from ._3DCNN import S3DCNN | 5,390 | self.Z_final = self.Z
self.Y_final = self.Y
self.X_final = self.X
self.stepsize = self.opt.stepsize # voxel
self.num_voxels = self.Z_final * self.Y_final * self.X_final
self.stepsize_log = self.stepsize
self.interval = self.stepsize
if self.opt.contracted_coord:
# Sampling strategy for contracted coordinate
contracted_rate = self.opt.contracted_ratio
num_id_voxels = int(self.num_voxels * (contracted_rate)**3)
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3)
diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2)
self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate)
if self.opt.infinite_range:
# depth_roi = [-self.far] * 3 + [self.far] * 3
zval_roi = [-diagonal] * 3 + [diagonal] * 3
fc = 1 - 0.5 / self.X # avoid NaN
zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples)
zs_world = vox.contracted2world(
zs_contracted[None, :, None].repeat(1, 1, 3),
# pc_range_roi=depth_roi,
pc_range_roi=zval_roi,
ratio=self.opt.contracted_ratio)[:, :, 0]
else:
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist()
self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio)
else:
self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3)
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0
length_pose_encoding = 3
if self.opt.position == 'embedding':
input_channel = self.opt.input_channel
self.pos_embedding = torch.nn.Parameter(torch.ones(
[1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]))
elif self.opt.position == 'embedding1':
input_channel = self.opt.input_channel
xyz_in_channels = 1 + 3
embedding_width = 192
embedding_depth = 5
self.embeddingnet = nn.Sequential(
nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True),
*[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True))
for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),)
nn.init.constant_(self.embeddingnet[-1].bias, 0)
self.pos_embedding1 = None
self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False)
else:
self.pos_embedding = None
self.pos_embedding1 = None
input_channel = self.opt.input_channel
scene_centroid_x = 0.0
scene_centroid_y = 0.0
scene_centroid_z = 0.0
scene_centroid = np.array([scene_centroid_x,
scene_centroid_y,
scene_centroid_z]).reshape([1, 3])
self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float())
self.bounds = (self.opt.real_size[0], self.opt.real_size[1],
self.opt.real_size[2], self.opt.real_size[3],
self.opt.real_size[4], self.opt.real_size[5])
# bounds = (-40, 40, -40, 40, -1, 5.4)
self.vox_util = vox.Vox_util(
self.Z, self.Y, self.X,
scene_centroid=self.scene_centroid,
bounds=self.bounds, position = self.opt.position, length_pose_encoding = length_pose_encoding, opt = self.opt,
assert_cube=False)
if self.opt.position != 'No' and self.opt.position != 'embedding':
self.meta_data = self.vox_util.get_meta_data(cam_center=torch.Tensor([[1.2475, 0.0673, 1.5356]]), camB_T_camA=None).to('cuda')
activate_fun = nn.ReLU(inplace=True)
if self.opt.aggregation == '3dcnn':
out_channel = self.opt.out_channel
self._3DCNN = S3DCNN(input_planes=input_channel, out_planes=out_channel, planes=self.opt.con_channel,
activate_fun=activate_fun, opt=opt)
else:
print('please define the aggregation')
exit()
def feature2vox_simple(self, features, pix_T_cams, cam0_T_camXs, __p, __u):
pix_T_cams_ = pix_T_cams
camXs_T_cam0_ = geom.safe_inverse(cam0_T_camXs)
_, C, Hf, Wf = features.shape
sy = Hf / float(self.opt.height)
sx = Wf / float(self.opt.width)
# unproject image feature to 3d grid
featpix_T_cams_ = geom.scale_intrinsics(pix_T_cams_, sx, sy)
# pix_T_cams_ shape: [6,4,4] feature down sample -> featpix_T_cams_
feat_mems_ = self.vox_util.unproject_image_to_mem(
features,
| # Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
class VolumeDecoder(nn.Module):
def __init__(self, opt):
super(VolumeDecoder, self).__init__()
self.opt = opt
self.use_semantic = self.opt.use_semantic
self.semantic_classes = self.opt.semantic_classes
self.batch = self.opt.batch_size // self.opt.cam_N
self.near = self.opt.min_depth
self.far = self.opt.max_depth
self.register_buffer('xyz_min', torch.from_numpy(
np.array([self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4]])))
self.register_buffer('xyz_max', torch.from_numpy(
np.array([self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]])))
self.ZMAX = self.opt.real_size[1]
self.Z = self.opt.voxels_size[0]
self.Y = self.opt.voxels_size[1]
self.X = self.opt.voxels_size[2]
self.Z_final = self.Z
self.Y_final = self.Y
self.X_final = self.X
self.stepsize = self.opt.stepsize # voxel
self.num_voxels = self.Z_final * self.Y_final * self.X_final
self.stepsize_log = self.stepsize
self.interval = self.stepsize
if self.opt.contracted_coord:
# Sampling strategy for contracted coordinate
contracted_rate = self.opt.contracted_ratio
num_id_voxels = int(self.num_voxels * (contracted_rate)**3)
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3)
diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2)
self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate)
if self.opt.infinite_range:
# depth_roi = [-self.far] * 3 + [self.far] * 3
zval_roi = [-diagonal] * 3 + [diagonal] * 3
fc = 1 - 0.5 / self.X # avoid NaN
zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples)
zs_world = vox.contracted2world(
zs_contracted[None, :, None].repeat(1, 1, 3),
# pc_range_roi=depth_roi,
pc_range_roi=zval_roi,
ratio=self.opt.contracted_ratio)[:, :, 0]
else:
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist()
self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio)
else:
self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3)
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0
length_pose_encoding = 3
if self.opt.position == 'embedding':
input_channel = self.opt.input_channel
self.pos_embedding = torch.nn.Parameter(torch.ones(
[1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]))
elif self.opt.position == 'embedding1':
input_channel = self.opt.input_channel
xyz_in_channels = 1 + 3
embedding_width = 192
embedding_depth = 5
self.embeddingnet = nn.Sequential(
nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True),
*[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True))
for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),)
nn.init.constant_(self.embeddingnet[-1].bias, 0)
self.pos_embedding1 = None
self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False)
else:
self.pos_embedding = None
self.pos_embedding1 = None
input_channel = self.opt.input_channel
scene_centroid_x = 0.0
scene_centroid_y = 0.0
scene_centroid_z = 0.0
scene_centroid = np.array([scene_centroid_x,
scene_centroid_y,
scene_centroid_z]).reshape([1, 3])
self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float())
self.bounds = (self.opt.real_size[0], self.opt.real_size[1],
self.opt.real_size[2], self.opt.real_size[3],
self.opt.real_size[4], self.opt.real_size[5])
# bounds = (-40, 40, -40, 40, -1, 5.4)
self.vox_util = vox.Vox_util(
self.Z, self.Y, self.X,
scene_centroid=self.scene_centroid,
bounds=self.bounds, position = self.opt.position, length_pose_encoding = length_pose_encoding, opt = self.opt,
assert_cube=False)
if self.opt.position != 'No' and self.opt.position != 'embedding':
self.meta_data = self.vox_util.get_meta_data(cam_center=torch.Tensor([[1.2475, 0.0673, 1.5356]]), camB_T_camA=None).to('cuda')
activate_fun = nn.ReLU(inplace=True)
if self.opt.aggregation == '3dcnn':
out_channel = self.opt.out_channel
self._3DCNN = S3DCNN(input_planes=input_channel, out_planes=out_channel, planes=self.opt.con_channel,
activate_fun=activate_fun, opt=opt)
else:
print('please define the aggregation')
exit()
def feature2vox_simple(self, features, pix_T_cams, cam0_T_camXs, __p, __u):
pix_T_cams_ = pix_T_cams
camXs_T_cam0_ = geom.safe_inverse(cam0_T_camXs)
_, C, Hf, Wf = features.shape
sy = Hf / float(self.opt.height)
sx = Wf / float(self.opt.width)
# unproject image feature to 3d grid
featpix_T_cams_ = geom.scale_intrinsics(pix_T_cams_, sx, sy)
# pix_T_cams_ shape: [6,4,4] feature down sample -> featpix_T_cams_
feat_mems_ = self.vox_util.unproject_image_to_mem(
features, | basic.matmul2(featpix_T_cams_, camXs_T_cam0_), | 2 | 2023-12-14 15:00:21+00:00 | 8k |
Kevin-thu/DiffMorpher | model.py | [
{
"identifier": "get_img",
"path": "utils/model_utils.py",
"snippet": "def get_img(img, resolution=512):\n norm_mean = [0.5, 0.5, 0.5]\n norm_std = [0.5, 0.5, 0.5]\n transform = transforms.Compose([\n transforms.Resize((resolution, resolution)),\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std)\n ])\n img = transform(img)\n return img.unsqueeze(0)"
},
{
"identifier": "slerp",
"path": "utils/model_utils.py",
"snippet": "@torch.no_grad()\ndef slerp(p0, p1, fract_mixing: float, adain=True):\n r\"\"\" Copied from lunarring/latentblending\n Helper function to correctly mix two random variables using spherical interpolation.\n The function will always cast up to float64 for sake of extra 4.\n Args:\n p0: \n First tensor for interpolation\n p1: \n Second tensor for interpolation\n fract_mixing: float \n Mixing coefficient of interval [0, 1]. \n 0 will return in p0\n 1 will return in p1\n 0.x will return a mix between both preserving angular velocity.\n \"\"\"\n if p0.dtype == torch.float16:\n recast_to = 'fp16'\n else:\n recast_to = 'fp32'\n\n p0 = p0.double()\n p1 = p1.double()\n\n if adain:\n mean1, std1 = calc_mean_std(p0)\n mean2, std2 = calc_mean_std(p1)\n mean = mean1 * (1 - fract_mixing) + mean2 * fract_mixing\n std = std1 * (1 - fract_mixing) + std2 * fract_mixing\n \n norm = torch.linalg.norm(p0) * torch.linalg.norm(p1)\n epsilon = 1e-7\n dot = torch.sum(p0 * p1) / norm\n dot = dot.clamp(-1+epsilon, 1-epsilon)\n\n theta_0 = torch.arccos(dot)\n sin_theta_0 = torch.sin(theta_0)\n theta_t = theta_0 * fract_mixing\n s0 = torch.sin(theta_0 - theta_t) / sin_theta_0\n s1 = torch.sin(theta_t) / sin_theta_0\n interp = p0*s0 + p1*s1\n\n if adain:\n interp = F.instance_norm(interp) * std + mean\n\n if recast_to == 'fp16':\n interp = interp.half()\n elif recast_to == 'fp32':\n interp = interp.float()\n\n return interp"
},
{
"identifier": "do_replace_attn",
"path": "utils/model_utils.py",
"snippet": "def do_replace_attn(key: str):\n # return key.startswith('up_blocks.2') or key.startswith('up_blocks.3')\n return key.startswith('up')"
},
{
"identifier": "train_lora",
"path": "utils/lora_utils.py",
"snippet": "def train_lora(image, prompt, save_lora_dir, model_path=None, tokenizer=None, text_encoder=None, vae=None, unet=None, noise_scheduler=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, weight_name=None, safe_serialization=False, progress=tqdm):\n # initialize accelerator\n accelerator = Accelerator(\n gradient_accumulation_steps=1,\n # mixed_precision='fp16'\n )\n set_seed(0)\n\n # Load the tokenizer\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n subfolder=\"tokenizer\",\n revision=None,\n use_fast=False,\n )\n # initialize the model\n if noise_scheduler is None:\n noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder=\"scheduler\")\n if text_encoder is None:\n text_encoder_cls = import_model_class_from_model_name_or_path(model_path, revision=None)\n text_encoder = text_encoder_cls.from_pretrained(\n model_path, subfolder=\"text_encoder\", revision=None\n )\n if vae is None:\n vae = AutoencoderKL.from_pretrained(\n model_path, subfolder=\"vae\", revision=None\n )\n if unet is None:\n unet = UNet2DConditionModel.from_pretrained(\n model_path, subfolder=\"unet\", revision=None\n )\n\n # set device and dtype\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n vae.requires_grad_(False)\n text_encoder.requires_grad_(False)\n unet.requires_grad_(False)\n\n unet.to(device)\n vae.to(device)\n text_encoder.to(device)\n\n # initialize UNet LoRA\n unet_lora_attn_procs = {}\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n else:\n raise NotImplementedError(\"name must start with up_blocks, mid_blocks, or down_blocks\")\n\n if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):\n lora_attn_processor_class = LoRAAttnAddedKVProcessor\n else:\n lora_attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n unet_lora_attn_procs[name] = lora_attn_processor_class(\n hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank\n )\n unet.set_attn_processor(unet_lora_attn_procs)\n unet_lora_layers = AttnProcsLayers(unet.attn_processors)\n\n # Optimizer creation\n params_to_optimize = (unet_lora_layers.parameters())\n optimizer = torch.optim.AdamW(\n params_to_optimize,\n lr=lora_lr,\n betas=(0.9, 0.999),\n weight_decay=1e-2,\n eps=1e-08,\n )\n\n lr_scheduler = get_scheduler(\n \"constant\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=lora_steps,\n num_cycles=1,\n power=1.0,\n )\n\n # prepare accelerator\n unet_lora_layers = accelerator.prepare_model(unet_lora_layers)\n optimizer = accelerator.prepare_optimizer(optimizer)\n lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)\n\n # initialize text embeddings\n with torch.no_grad():\n text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None)\n text_embedding = encode_prompt(\n text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False\n )\n\n if type(image) == np.ndarray:\n image = Image.fromarray(image)\n \n # initialize latent distribution\n image_transforms = transforms.Compose(\n [\n transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),\n # transforms.RandomCrop(512),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n image = image_transforms(image).to(device)\n image = image.unsqueeze(dim=0)\n \n latents_dist = vae.encode(image).latent_dist\n for _ in progress.tqdm(range(lora_steps), desc=\"Training LoRA...\"):\n unet.train()\n model_input = latents_dist.sample() * vae.config.scaling_factor\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(model_input)\n bsz, channels, height, width = model_input.shape\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device\n )\n timesteps = timesteps.long()\n\n # Add noise to the model input according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)\n\n # Predict the noise residual\n model_pred = unet(noisy_model_input, timesteps, text_embedding).sample\n\n # Get the target for loss depending on the prediction type\n if noise_scheduler.config.prediction_type == \"epsilon\":\n target = noise\n elif noise_scheduler.config.prediction_type == \"v_prediction\":\n target = noise_scheduler.get_velocity(model_input, noise, timesteps)\n else:\n raise ValueError(f\"Unknown prediction type {noise_scheduler.config.prediction_type}\")\n\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n # save the trained lora\n # unet = unet.to(torch.float32)\n # vae = vae.to(torch.float32)\n # text_encoder = text_encoder.to(torch.float32)\n\n # unwrap_model is used to remove all special modules added when doing distributed training\n # so here, there is no need to call unwrap_model\n # unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)\n LoraLoaderMixin.save_lora_weights(\n save_directory=save_lora_dir,\n unet_lora_layers=unet_lora_layers,\n text_encoder_lora_layers=None,\n weight_name=weight_name,\n safe_serialization=safe_serialization\n )"
},
{
"identifier": "load_lora",
"path": "utils/lora_utils.py",
"snippet": "def load_lora(unet, lora_0, lora_1, alpha):\n lora = {}\n for key in lora_0:\n lora[key] = (1 - alpha) * lora_0[key] + alpha * lora_1[key]\n unet.load_attn_procs(lora)\n return unet"
},
{
"identifier": "AlphaScheduler",
"path": "utils/alpha_scheduler.py",
"snippet": "class AlphaScheduler:\n def __init__(self):\n ...\n\n def from_imgs(self, imgs):\n self.__num_values = len(imgs)\n self.__values = [0]\n for i in range(self.__num_values - 1):\n dis = distance(imgs[i], imgs[i + 1])\n self.__values.append(dis)\n self.__values[i + 1] += self.__values[i]\n for i in range(self.__num_values):\n self.__values[i] /= self.__values[-1]\n\n def save(self, filename):\n torch.save(torch.tensor(self.__values), filename)\n\n def load(self, filename):\n self.__values = torch.load(filename).tolist()\n self.__num_values = len(self.__values)\n\n def get_x(self, y):\n assert y >= 0 and y <= 1\n id = bisect.bisect_left(self.__values, y)\n id -= 1\n if id < 0:\n id = 0\n yl = self.__values[id]\n yr = self.__values[id + 1]\n xl = id * (1 / (self.__num_values - 1))\n xr = (id + 1) * (1 / (self.__num_values - 1))\n x = (y - yl) / (yr - yl) * (xr - xl) + xl\n return x\n\n def get_list(self, len=None):\n if len is None:\n len = self.__num_values\n\n ys = torch.linspace(0, 1, len)\n res = [self.get_x(y) for y in ys]\n return res"
}
] | import os
import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import safetensors
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from PIL import Image
from torchvision import transforms
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import StableDiffusionPipeline
from argparse import ArgumentParser
from utils.model_utils import get_img, slerp, do_replace_attn
from utils.lora_utils import train_lora, load_lora
from utils.alpha_scheduler import AlphaScheduler | 5,856 | unconditional_input.input_ids.to(DEVICE))[0]
text_embeddings = torch.cat(
[unconditional_embeddings, text_embeddings], dim=0)
print("latents shape: ", latents.shape)
# interative sampling
self.scheduler.set_timesteps(num_inference_steps)
print("Valid timesteps: ", reversed(self.scheduler.timesteps))
# print("attributes: ", self.scheduler.__dict__)
latents_list = [latents]
pred_x0_list = [latents]
for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")):
if num_actual_inference_steps is not None and i >= num_actual_inference_steps:
continue
if guidance_scale > 1.:
model_inputs = torch.cat([latents] * 2)
else:
model_inputs = latents
# predict the noise
noise_pred = self.unet(
model_inputs, t, encoder_hidden_states=text_embeddings).sample
if guidance_scale > 1.:
noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
noise_pred = noise_pred_uncon + guidance_scale * \
(noise_pred_con - noise_pred_uncon)
# compute the previous noise sample x_t-1 -> x_t
latents, pred_x0 = self.inv_step(noise_pred, t, latents)
latents_list.append(latents)
pred_x0_list.append(pred_x0)
return latents
@torch.no_grad()
def ddim_inversion(self, latent, cond):
timesteps = reversed(self.scheduler.timesteps)
with torch.autocast(device_type='cuda', dtype=torch.float32):
for i, t in enumerate(tqdm.tqdm(timesteps, desc="DDIM inversion")):
cond_batch = cond.repeat(latent.shape[0], 1, 1)
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i - 1]]
if i > 0 else self.scheduler.final_alpha_cumprod
)
mu = alpha_prod_t ** 0.5
mu_prev = alpha_prod_t_prev ** 0.5
sigma = (1 - alpha_prod_t) ** 0.5
sigma_prev = (1 - alpha_prod_t_prev) ** 0.5
eps = self.unet(
latent, t, encoder_hidden_states=cond_batch).sample
pred_x0 = (latent - sigma_prev * eps) / mu_prev
latent = mu * pred_x0 + sigma * eps
# if save_latents:
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
return latent
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
):
"""
predict the sample of the next step in the denoise process.
"""
prev_timestep = timestep - \
self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
alpha_prod_t_prev = self.scheduler.alphas_cumprod[
prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output
x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir
return x_prev, pred_x0
@torch.no_grad()
def image2latent(self, image):
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if type(image) is Image:
image = np.array(image)
image = torch.from_numpy(image).float() / 127.5 - 1
image = image.permute(2, 0, 1).unsqueeze(0)
# input image density range [-1, 1]
latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean
latents = latents * 0.18215
return latents
@torch.no_grad()
def latent2image(self, latents, return_type='np'):
latents = 1 / 0.18215 * latents.detach()
image = self.vae.decode(latents)['sample']
if return_type == 'np':
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = (image * 255).astype(np.uint8)
elif return_type == "pt":
image = (image / 2 + 0.5).clamp(0, 1)
return image
def latent2image_grad(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents)['sample']
return image # range [-1, 1]
@torch.no_grad()
def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):
# latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \
# torch.sin(alpha * torch.pi / 2) * img_noise_1
# latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1
# latents = latents / ((1 - alpha) ** 2 + alpha ** 2)
|
class StoreProcessor():
def __init__(self, original_processor, value_dict, name):
self.original_processor = original_processor
self.value_dict = value_dict
self.name = name
self.value_dict[self.name] = dict()
self.id = 0
def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs):
# Is self attention
if encoder_hidden_states is None:
self.value_dict[self.name][self.id] = hidden_states.detach()
self.id += 1
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
return res
class LoadProcessor():
def __init__(self, original_processor, name, img0_dict, img1_dict, alpha, beta=0, lamd=0.6):
super().__init__()
self.original_processor = original_processor
self.name = name
self.img0_dict = img0_dict
self.img1_dict = img1_dict
self.alpha = alpha
self.beta = beta
self.lamd = lamd
self.id = 0
def parent_call(
self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0
):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(
batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
attention_mask = attn.prepare_attention_mask(
attention_mask, sequence_length, batch_size)
if attn.group_norm is not None:
hidden_states = attn.group_norm(
hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states) + scale * \
self.original_processor.to_q_lora(hidden_states)
query = attn.head_to_batch_dim(query)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(
encoder_hidden_states)
key = attn.to_k(encoder_hidden_states) + scale * \
self.original_processor.to_k_lora(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states) + scale * \
self.original_processor.to_v_lora(encoder_hidden_states)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(
query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](
hidden_states) + scale * self.original_processor.to_out_lora(hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(
-1, -2).reshape(batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs):
# Is self attention
if encoder_hidden_states is None:
# hardcode timestep
if self.id < 50 * self.lamd:
map0 = self.img0_dict[self.name][self.id]
map1 = self.img1_dict[self.name][self.id]
cross_map = self.beta * hidden_states + \
(1 - self.beta) * ((1 - self.alpha) * map0 + self.alpha * map1)
# cross_map = self.beta * hidden_states + \
# (1 - self.beta) * slerp(map0, map1, self.alpha)
# cross_map = slerp(slerp(map0, map1, self.alpha),
# hidden_states, self.beta)
# cross_map = hidden_states
# cross_map = torch.cat(
# ((1 - self.alpha) * map0, self.alpha * map1), dim=1)
# res = self.original_processor(attn, hidden_states, *args,
# encoder_hidden_states=cross_map,
# attention_mask=attention_mask,
# temb=temb, **kwargs)
res = self.parent_call(attn, hidden_states, *args,
encoder_hidden_states=cross_map,
attention_mask=attention_mask,
**kwargs)
else:
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
self.id += 1
# if self.id == len(self.img0_dict[self.name]):
if self.id == len(self.img0_dict[self.name]):
self.id = 0
else:
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
return res
class DiffMorpherPipeline(StableDiffusionPipeline):
def __init__(self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__(vae, text_encoder, tokenizer, unet, scheduler,
safety_checker, feature_extractor, requires_safety_checker)
self.img0_dict = dict()
self.img1_dict = dict()
def inv_step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
eta=0.,
verbose=False
):
"""
Inverse sampling for DDIM Inversion
"""
if verbose:
print("timestep: ", timestep)
next_step = timestep
timestep = min(timestep - self.scheduler.config.num_train_timesteps //
self.scheduler.num_inference_steps, 999)
alpha_prod_t = self.scheduler.alphas_cumprod[
timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output
x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir
return x_next, pred_x0
@torch.no_grad()
def invert(
self,
image: torch.Tensor,
prompt,
num_inference_steps=50,
num_actual_inference_steps=None,
guidance_scale=1.,
eta=0.0,
**kwds):
"""
invert a real image into noise map with determinisc DDIM inversion
"""
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
batch_size = image.shape[0]
if isinstance(prompt, list):
if batch_size == 1:
image = image.expand(len(prompt), -1, -1, -1)
elif isinstance(prompt, str):
if batch_size > 1:
prompt = [prompt] * batch_size
# text embeddings
text_input = self.tokenizer(
prompt,
padding="max_length",
max_length=77,
return_tensors="pt"
)
text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]
print("input text embeddings :", text_embeddings.shape)
# define initial latents
latents = self.image2latent(image)
# unconditional embedding for classifier free guidance
if guidance_scale > 1.:
max_length = text_input.input_ids.shape[-1]
unconditional_input = self.tokenizer(
[""] * batch_size,
padding="max_length",
max_length=77,
return_tensors="pt"
)
unconditional_embeddings = self.text_encoder(
unconditional_input.input_ids.to(DEVICE))[0]
text_embeddings = torch.cat(
[unconditional_embeddings, text_embeddings], dim=0)
print("latents shape: ", latents.shape)
# interative sampling
self.scheduler.set_timesteps(num_inference_steps)
print("Valid timesteps: ", reversed(self.scheduler.timesteps))
# print("attributes: ", self.scheduler.__dict__)
latents_list = [latents]
pred_x0_list = [latents]
for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")):
if num_actual_inference_steps is not None and i >= num_actual_inference_steps:
continue
if guidance_scale > 1.:
model_inputs = torch.cat([latents] * 2)
else:
model_inputs = latents
# predict the noise
noise_pred = self.unet(
model_inputs, t, encoder_hidden_states=text_embeddings).sample
if guidance_scale > 1.:
noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
noise_pred = noise_pred_uncon + guidance_scale * \
(noise_pred_con - noise_pred_uncon)
# compute the previous noise sample x_t-1 -> x_t
latents, pred_x0 = self.inv_step(noise_pred, t, latents)
latents_list.append(latents)
pred_x0_list.append(pred_x0)
return latents
@torch.no_grad()
def ddim_inversion(self, latent, cond):
timesteps = reversed(self.scheduler.timesteps)
with torch.autocast(device_type='cuda', dtype=torch.float32):
for i, t in enumerate(tqdm.tqdm(timesteps, desc="DDIM inversion")):
cond_batch = cond.repeat(latent.shape[0], 1, 1)
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i - 1]]
if i > 0 else self.scheduler.final_alpha_cumprod
)
mu = alpha_prod_t ** 0.5
mu_prev = alpha_prod_t_prev ** 0.5
sigma = (1 - alpha_prod_t) ** 0.5
sigma_prev = (1 - alpha_prod_t_prev) ** 0.5
eps = self.unet(
latent, t, encoder_hidden_states=cond_batch).sample
pred_x0 = (latent - sigma_prev * eps) / mu_prev
latent = mu * pred_x0 + sigma * eps
# if save_latents:
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
return latent
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
):
"""
predict the sample of the next step in the denoise process.
"""
prev_timestep = timestep - \
self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
alpha_prod_t_prev = self.scheduler.alphas_cumprod[
prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output
x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir
return x_prev, pred_x0
@torch.no_grad()
def image2latent(self, image):
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if type(image) is Image:
image = np.array(image)
image = torch.from_numpy(image).float() / 127.5 - 1
image = image.permute(2, 0, 1).unsqueeze(0)
# input image density range [-1, 1]
latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean
latents = latents * 0.18215
return latents
@torch.no_grad()
def latent2image(self, latents, return_type='np'):
latents = 1 / 0.18215 * latents.detach()
image = self.vae.decode(latents)['sample']
if return_type == 'np':
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = (image * 255).astype(np.uint8)
elif return_type == "pt":
image = (image / 2 + 0.5).clamp(0, 1)
return image
def latent2image_grad(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents)['sample']
return image # range [-1, 1]
@torch.no_grad()
def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):
# latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \
# torch.sin(alpha * torch.pi / 2) * img_noise_1
# latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1
# latents = latents / ((1 - alpha) ** 2 + alpha ** 2) | latents = slerp(img_noise_0, img_noise_1, alpha, self.use_adain) | 1 | 2023-12-11 15:19:07+00:00 | 8k |
modelscope/richdreamer | threestudio/models/renderers/nerf_volume_renderer.py | [
{
"identifier": "BaseBackground",
"path": "threestudio/models/background/base.py",
"snippet": "class BaseBackground(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n def configure(self):\n pass\n\n def forward(self, dirs: Float[Tensor, \"B H W 3\"]) -> Float[Tensor, \"B H W Nc\"]:\n raise NotImplementedError"
},
{
"identifier": "ImportanceEstimator",
"path": "threestudio/models/estimators.py",
"snippet": "class ImportanceEstimator(AbstractEstimator):\n def __init__(\n self,\n ) -> None:\n super().__init__()\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\"uniform\", \"lindisp\"] = \"uniform\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals_fine = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n\n t_vals = torch.cat([t_vals, t_vals_fine], dim=-1)\n t_vals, _ = torch.sort(t_vals, dim=-1)\n\n t_starts_ = t_vals[..., :-1]\n t_ends_ = t_vals[..., 1:]\n\n return t_starts_, t_ends_"
},
{
"identifier": "BaseImplicitGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation\n\n assert self.isosurface_helper is not None\n\n field, deformation = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh = self._isosurface(self.bbox)\n return mesh"
},
{
"identifier": "BaseMaterial",
"path": "threestudio/models/materials/base.py",
"snippet": "class BaseMaterial(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n requires_normal: bool = False\n requires_tangent: bool = False\n\n def configure(self):\n pass\n\n def forward(self, *args, **kwargs) -> Float[Tensor, \"*B 3\"]:\n raise NotImplementedError\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "create_network_with_input_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def create_network_with_input_encoding(\n n_input_dims: int, n_output_dims: int, encoding_config, network_config\n) -> nn.Module:\n # input suppose to be range [0, 1]\n network_with_input_encoding: nn.Module\n if encoding_config.otype in [\n \"VanillaFrequency\",\n \"ProgressiveBandHashGrid\",\n ] or network_config.otype in [\"VanillaMLP\", \"SphereInitVanillaMLP\"]:\n encoding = get_encoding(n_input_dims, encoding_config)\n network = get_mlp(encoding.n_output_dims, n_output_dims, network_config)\n network_with_input_encoding = NetworkWithInputEncoding(encoding, network)\n else:\n network_with_input_encoding = TCNNNetworkWithInputEncoding(\n n_input_dims=n_input_dims,\n n_output_dims=n_output_dims,\n encoding_config=config_to_primitive(encoding_config),\n network_config=config_to_primitive(network_config),\n )\n return network_with_input_encoding"
},
{
"identifier": "VolumeRenderer",
"path": "threestudio/models/renderers/base.py",
"snippet": "class VolumeRenderer(Renderer):\n pass"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler_to_instance",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler_to_instance(config, optimizer):\n if config.name == \"ChainedScheduler\":\n schedulers = [\n parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers\n ]\n scheduler = lr_scheduler.ChainedScheduler(schedulers)\n elif config.name == \"Sequential\":\n schedulers = [\n parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers\n ]\n scheduler = lr_scheduler.SequentialLR(\n optimizer, schedulers, milestones=config.milestones\n )\n else:\n scheduler = getattr(lr_scheduler, config.name)(optimizer, **config.args)\n return scheduler"
},
{
"identifier": "chunk_batch",
"path": "threestudio/utils/ops.py",
"snippet": "def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:\n if chunk_size <= 0:\n return func(*args, **kwargs)\n B = None\n for arg in list(args) + list(kwargs.values()):\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n assert (\n B is not None\n ), \"No tensor found in args or kwargs, cannot determine batch size.\"\n out = defaultdict(list)\n out_type = None\n # max(1, B) to support B == 0\n for i in range(0, max(1, B), chunk_size):\n out_chunk = func(\n *[\n arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for arg in args\n ],\n **{\n k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for k, arg in kwargs.items()\n },\n )\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(\n f\"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.\"\n )\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n out[k].append(v)\n\n if out_type is None:\n return None\n\n out_merged: Dict[Any, Optional[torch.Tensor]] = {}\n for k, v in out.items():\n if all([vv is None for vv in v]):\n # allow None in return value\n out_merged[k] = None\n elif all([isinstance(vv, torch.Tensor) for vv in v]):\n out_merged[k] = torch.cat(v, dim=0)\n else:\n raise TypeError(\n f\"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}\"\n )\n\n if out_type is torch.Tensor:\n return out_merged[0]\n elif out_type in [tuple, list]:\n return out_type([out_merged[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out_merged"
},
{
"identifier": "get_activation",
"path": "threestudio/utils/ops.py",
"snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")"
},
{
"identifier": "validate_empty_rays",
"path": "threestudio/utils/ops.py",
"snippet": "def validate_empty_rays(ray_indices, t_start, t_end):\n if ray_indices.nelement() == 0:\n threestudio.warn(\"Empty rays_indices!\")\n ray_indices = torch.LongTensor([0]).to(ray_indices)\n t_start = torch.Tensor([0]).to(ray_indices)\n t_end = torch.Tensor([0]).to(ray_indices)\n return ray_indices, t_start, t_end"
}
] | import math
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from functools import partial
from threestudio.models.background.base import BaseBackground
from threestudio.models.estimators import ImportanceEstimator
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import create_network_with_input_encoding
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.systems.utils import (parse_optimizer,
parse_scheduler_to_instance,)
from threestudio.utils.ops import (chunk_batch, get_activation,
validate_empty_rays,)
from threestudio.utils.typing import * | 4,822 |
@threestudio.register("nerf-volume-renderer")
class NeRFVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
eval_chunk_size: int = 160000
randomized: bool = True
near_plane: float = 0.0
far_plane: float = 1e10
return_comp_normal: bool = False
return_normal_perturb: bool = False
# in ["occgrid", "proposal", "importance"]
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for proposal
proposal_network_config: Optional[dict] = None
prop_optimizer_config: Optional[dict] = None
prop_scheduler_config: Optional[dict] = None
num_samples_per_ray_proposal: int = 64
# for importance
num_samples_per_ray_importance: int = 64
occ_grid_res: int = 32
depth_norm_radius: float = 1.0
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=self.cfg.occ_grid_res, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance":
|
@threestudio.register("nerf-volume-renderer")
class NeRFVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
eval_chunk_size: int = 160000
randomized: bool = True
near_plane: float = 0.0
far_plane: float = 1e10
return_comp_normal: bool = False
return_normal_perturb: bool = False
# in ["occgrid", "proposal", "importance"]
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for proposal
proposal_network_config: Optional[dict] = None
prop_optimizer_config: Optional[dict] = None
prop_scheduler_config: Optional[dict] = None
num_samples_per_ray_proposal: int = 64
# for importance
num_samples_per_ray_importance: int = 64
occ_grid_res: int = 32
depth_norm_radius: float = 1.0
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=self.cfg.occ_grid_res, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance": | self.estimator = ImportanceEstimator() | 1 | 2023-12-06 07:53:11+00:00 | 8k |
rehg-lab/RAVE | annotator/oneformer/detectron2/data/datasets/cityscapes.py | [
{
"identifier": "BoxMode",
"path": "annotator/oneformer/detectron2/structures/boxes.py",
"snippet": "class BoxMode(IntEnum):\r\n \"\"\"\r\n Enum of different ways to represent a box.\r\n \"\"\"\r\n\r\n XYXY_ABS = 0\r\n \"\"\"\r\n (x0, y0, x1, y1) in absolute floating points coordinates.\r\n The coordinates in range [0, width or height].\r\n \"\"\"\r\n XYWH_ABS = 1\r\n \"\"\"\r\n (x0, y0, w, h) in absolute floating points coordinates.\r\n \"\"\"\r\n XYXY_REL = 2\r\n \"\"\"\r\n Not yet supported!\r\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\r\n \"\"\"\r\n XYWH_REL = 3\r\n \"\"\"\r\n Not yet supported!\r\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\r\n \"\"\"\r\n XYWHA_ABS = 4\r\n \"\"\"\r\n (xc, yc, w, h, a) in absolute floating points coordinates.\r\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\r\n \"\"\"\r\n Args:\r\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\r\n from_mode, to_mode (BoxMode)\r\n\r\n Returns:\r\n The converted box of the same type.\r\n \"\"\"\r\n if from_mode == to_mode:\r\n return box\r\n\r\n original_type = type(box)\r\n is_numpy = isinstance(box, np.ndarray)\r\n single_box = isinstance(box, (list, tuple))\r\n if single_box:\r\n assert len(box) == 4 or len(box) == 5, (\r\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\r\n \" where k == 4 or 5\"\r\n )\r\n arr = torch.tensor(box)[None, :]\r\n else:\r\n # avoid modifying the input box\r\n if is_numpy:\r\n arr = torch.from_numpy(np.asarray(box)).clone()\r\n else:\r\n arr = box.clone()\r\n\r\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\r\n BoxMode.XYXY_REL,\r\n BoxMode.XYWH_REL,\r\n ], \"Relative mode not yet supported!\"\r\n\r\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\r\n assert (\r\n arr.shape[-1] == 5\r\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\r\n original_dtype = arr.dtype\r\n arr = arr.double()\r\n\r\n w = arr[:, 2]\r\n h = arr[:, 3]\r\n a = arr[:, 4]\r\n c = torch.abs(torch.cos(a * math.pi / 180.0))\r\n s = torch.abs(torch.sin(a * math.pi / 180.0))\r\n # This basically computes the horizontal bounding rectangle of the rotated box\r\n new_w = c * w + s * h\r\n new_h = c * h + s * w\r\n\r\n # convert center to top-left corner\r\n arr[:, 0] -= new_w / 2.0\r\n arr[:, 1] -= new_h / 2.0\r\n # bottom-right corner\r\n arr[:, 2] = arr[:, 0] + new_w\r\n arr[:, 3] = arr[:, 1] + new_h\r\n\r\n arr = arr[:, :4].to(dtype=original_dtype)\r\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\r\n original_dtype = arr.dtype\r\n arr = arr.double()\r\n arr[:, 0] += arr[:, 2] / 2.0\r\n arr[:, 1] += arr[:, 3] / 2.0\r\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\r\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\r\n else:\r\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\r\n arr[:, 2] += arr[:, 0]\r\n arr[:, 3] += arr[:, 1]\r\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\r\n arr[:, 2] -= arr[:, 0]\r\n arr[:, 3] -= arr[:, 1]\r\n else:\r\n raise NotImplementedError(\r\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\r\n from_mode, to_mode\r\n )\r\n )\r\n\r\n if single_box:\r\n return original_type(arr.flatten().tolist())\r\n if is_numpy:\r\n return arr.numpy()\r\n else:\r\n return arr\r"
},
{
"identifier": "get_world_size",
"path": "annotator/oneformer/detectron2/utils/comm.py",
"snippet": "def get_world_size() -> int:\r\n if not dist.is_available():\r\n return 1\r\n if not dist.is_initialized():\r\n return 1\r\n return dist.get_world_size()\r"
},
{
"identifier": "PathManager",
"path": "annotator/oneformer/detectron2/utils/file_io.py",
"snippet": "class Detectron2Handler(PathHandler):\r\n PREFIX = \"detectron2://\"\r\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\r\n def _get_supported_prefixes(self):\r\n def _get_local_path(self, path, **kwargs):\r\n def _open(self, path, mode=\"r\", **kwargs):\r"
},
{
"identifier": "setup_logger",
"path": "annotator/oneformer/detectron2/utils/logger.py",
"snippet": "@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers\r\ndef setup_logger(\r\n output=None, distributed_rank=0, *, color=True, name=\"detectron2\", abbrev_name=None\r\n):\r\n \"\"\"\r\n Initialize the detectron2 logger and set its verbosity level to \"DEBUG\".\r\n\r\n Args:\r\n output (str): a file name or a directory to save log. If None, will not save log file.\r\n If ends with \".txt\" or \".log\", assumed to be a file name.\r\n Otherwise, logs will be saved to `output/log.txt`.\r\n name (str): the root module name of this logger\r\n abbrev_name (str): an abbreviation of the module, to avoid long names in logs.\r\n Set to \"\" to not log the root module in logs.\r\n By default, will abbreviate \"detectron2\" to \"d2\" and leave other\r\n modules unchanged.\r\n\r\n Returns:\r\n logging.Logger: a logger\r\n \"\"\"\r\n logger = logging.getLogger(name)\r\n logger.setLevel(logging.DEBUG)\r\n logger.propagate = False\r\n\r\n if abbrev_name is None:\r\n abbrev_name = \"d2\" if name == \"detectron2\" else name\r\n\r\n plain_formatter = logging.Formatter(\r\n \"[%(asctime)s] %(name)s %(levelname)s: %(message)s\", datefmt=\"%m/%d %H:%M:%S\"\r\n )\r\n # stdout logging: master only\r\n if distributed_rank == 0:\r\n ch = logging.StreamHandler(stream=sys.stdout)\r\n ch.setLevel(logging.DEBUG)\r\n if color:\r\n formatter = _ColorfulFormatter(\r\n colored(\"[%(asctime)s %(name)s]: \", \"green\") + \"%(message)s\",\r\n datefmt=\"%m/%d %H:%M:%S\",\r\n root_name=name,\r\n abbrev_name=str(abbrev_name),\r\n )\r\n else:\r\n formatter = plain_formatter\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n\r\n # file logging: all workers\r\n if output is not None:\r\n if output.endswith(\".txt\") or output.endswith(\".log\"):\r\n filename = output\r\n else:\r\n filename = os.path.join(output, \"log.txt\")\r\n if distributed_rank > 0:\r\n filename = filename + \".rank{}\".format(distributed_rank)\r\n PathManager.mkdirs(os.path.dirname(filename))\r\n\r\n fh = logging.StreamHandler(_cached_log_stream(filename))\r\n fh.setLevel(logging.DEBUG)\r\n fh.setFormatter(plain_formatter)\r\n logger.addHandler(fh)\r\n\r\n return logger\r"
}
] | import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
import annotator.oneformer.pycocotools.mask as mask_util
import cv2 # noqa
import argparse
from itertools import chain
from PIL import Image
from annotator.oneformer.detectron2.structures import BoxMode
from annotator.oneformer.detectron2.utils.comm import get_world_size
from annotator.oneformer.detectron2.utils.file_io import PathManager
from annotator.oneformer.detectron2.utils.logger import setup_logger
from cityscapesscripts.helpers.labels import labels
from cityscapesscripts.helpers.labels import id2label, name2label
from shapely.geometry import MultiPolygon, Polygon
from annotator.oneformer.detectron2.data.catalog import Metadata
from annotator.oneformer.detectron2.utils.visualizer import Visualizer
from cityscapesscripts.helpers.labels import labels
| 4,762 | label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
| # Copyright (c) Facebook, Inc. and its affiliates.
try:
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
image_file = os.path.join(city_img_dir, basename)
suffix = "leftImg8bit.png"
assert basename.endswith(suffix), basename
basename = basename[: -len(suffix)]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
| logger = setup_logger(name=__name__)
| 3 | 2023-12-05 02:51:53+00:00 | 8k |
DiffusionLight/DiffusionLight | relighting/pipeline_xl.py | [
{
"identifier": "custom_prepare_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n image=None,\n timestep=None,\n is_strength_max=True,\n use_noise_moving=True,\n return_noise=False,\n return_image_latents=False,\n newx=0,\n newy=0,\n newr=256,\n current_seed=None,\n ):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if (image is None or timestep is None) and not is_strength_max:\n raise ValueError(\n \"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.\"\n \"However, either the image or the noise timestep has not been provided.\"\n )\n\n if image.shape[1] == 4:\n image_latents = image.to(device=device, dtype=dtype)\n elif return_image_latents or (latents is None and not is_strength_max):\n image = image.to(device=device, dtype=dtype)\n image_latents = self._encode_vae_image(image=image, generator=generator)\n\n if latents is None and use_noise_moving:\n # random big noise map\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n noise = expand_noise(noise, shape, seed=current_seed, device=device, dtype=dtype)\n \n # ensure noise is the same regardless of inpainting location (top-left corner notation)\n newys = [newy] if not isinstance(newy, list) else newy\n newxs = [newx] if not isinstance(newx, list) else newx\n big_noise = noise.clone()\n prev_noise = None\n for newy, newx in zip(newys, newxs):\n # find patch location within big noise map\n sy = big_noise.shape[2] // 4 + ((512 - 128) - newy) // self.vae_scale_factor\n sx = big_noise.shape[3] // 4 + ((512 - 128) - newx) // self.vae_scale_factor\n\n if prev_noise is not None:\n new_noise = big_noise[:, :, sy:sy+shape[2], sx:sx+shape[3]]\n\n ball_mask = torch.zeros(shape, device=device, dtype=bool)\n top_left = (newy // self.vae_scale_factor, newx // self.vae_scale_factor)\n bottom_right = (top_left[0] + newr // self.vae_scale_factor, top_left[1] + newr // self.vae_scale_factor) # fixed ball size r = 256\n ball_mask[:, :, top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]] = True\n\n noise = prev_noise.clone()\n noise[ball_mask] = new_noise[ball_mask]\n else:\n noise = big_noise[:, :, sy:sy+shape[2], sx:sx+shape[3]]\n\n prev_noise = noise.clone()\n\n # if strength is 1. then initialise the latents to noise, else initial to image + noise\n latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)\n # if pure noise then scale the initial latents by the Scheduler's init sigma\n latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents\n elif latents is None:\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n latents = image_latents.to(device)\n else:\n noise = latents.to(device)\n latents = noise * self.scheduler.init_noise_sigma\n\n outputs = (latents,)\n\n if return_noise:\n outputs += (noise,)\n\n if return_image_latents:\n outputs += (image_latents,)\n\n return outputs"
},
{
"identifier": "custom_prepare_mask_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_mask_latents(\n self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance\n):\n # resize the mask to latents shape as we concatenate the mask to the latents\n # we do that before converting to dtype to avoid breaking in case we're using cpu_offload\n # and half precision\n mask = torch.nn.functional.interpolate(\n mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"bilinear\", align_corners=False #PURE: We add this to avoid sharp border of the ball\n )\n mask = mask.to(device=device, dtype=dtype)\n\n # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method\n if mask.shape[0] < batch_size:\n if not batch_size % mask.shape[0] == 0:\n raise ValueError(\n \"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to\"\n f\" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number\"\n \" of masks that you pass is divisible by the total requested batch size.\"\n )\n mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)\n\n mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask\n\n masked_image_latents = None\n if masked_image is not None:\n masked_image = masked_image.to(device=device, dtype=dtype)\n masked_image_latents = self._encode_vae_image(masked_image, generator=generator)\n if masked_image_latents.shape[0] < batch_size:\n if not batch_size % masked_image_latents.shape[0] == 0:\n raise ValueError(\n \"The passed images and the required batch size don't match. Images are supposed to be duplicated\"\n f\" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed.\"\n \" Make sure the number of images that you pass is divisible by the total requested batch size.\"\n )\n masked_image_latents = masked_image_latents.repeat(\n batch_size // masked_image_latents.shape[0], 1, 1, 1\n )\n\n masked_image_latents = (\n torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents\n )\n\n # aligning device to prevent device errors when concating it with the latent model input\n masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)\n\n return mask, masked_image_latents"
},
{
"identifier": "rescale_noise_cfg",
"path": "relighting/pipeline_utils.py",
"snippet": "def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):\n \"\"\"\n Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4\n \"\"\"\n std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)\n std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)\n # rescale the results from guidance (fixes overexposure)\n noise_pred_rescaled = noise_cfg * (std_text / std_cfg)\n # mix with the original results from guidance by factor guidance_rescale to avoid \"plain looking\" images\n noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg\n return noise_cfg"
}
] | import torch
from typing import List, Union, Dict, Any, Callable, Optional, Tuple
from diffusers.utils.torch_utils import is_compiled_module
from diffusers.models import ControlNetModel
from diffusers.pipelines.controlnet import MultiControlNetModel
from diffusers import StableDiffusionXLControlNetInpaintPipeline
from diffusers.image_processor import PipelineImageInput
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
from relighting.pipeline_utils import custom_prepare_latents, custom_prepare_mask_latents, rescale_noise_cfg | 4,892 | crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
controlnet_added_cond_kwargs = {
"text_embeds": add_text_embeds.chunk(2)[1],
"time_ids": add_time_ids.chunk(2)[1],
}
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_added_cond_kwargs = added_cond_kwargs
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
# # Resize control_image to match the size of the input to the controlnet
# if control_image.shape[-2:] != control_model_input.shape[-2:]:
# control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False)
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
added_cond_kwargs=controlnet_added_cond_kwargs,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
print("rescale: ", guidance_rescale)
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
class CustomStableDiffusionXLControlNetInpaintPipeline(StableDiffusionXLControlNetInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
control_image: Union[
PipelineImageInput,
List[PipelineImageInput],
] = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.9999,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)
self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
# # 0.0 Default height and width to unet
# height = height or self.unet.config.sample_size * self.vae_scale_factor
# width = width or self.unet.config.sample_size * self.vae_scale_factor
# 0.1 align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
# 1. Check inputs
self.check_inputs(
prompt,
prompt_2,
control_image,
strength,
num_inference_steps,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
controlnet_conditioning_scale,
control_guidance_start,
control_guidance_end,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. set timesteps
def denoising_value_valid(dnv):
return isinstance(denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None
)
# check that number of inference steps is not < 1 - as this doesn't make sense
if num_inference_steps < 1:
raise ValueError(
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
)
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
is_strength_max = strength == 1.0
# 5. Preprocess mask and image - resizes image and mask w.r.t height and width
# 5.1 Prepare init image
init_image = self.image_processor.preprocess(image, height=height, width=width)
init_image = init_image.to(dtype=torch.float32)
# 5.2 Prepare control images
if isinstance(controlnet, ControlNetModel):
control_image = self.prepare_control_image(
image=control_image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
elif isinstance(controlnet, MultiControlNetModel):
control_images = []
for control_image_ in control_image:
control_image_ = self.prepare_control_image(
image=control_image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
control_images.append(control_image_)
control_image = control_images
else:
raise ValueError(f"{controlnet.__class__} is not supported.")
# 5.3 Prepare mask
mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
masked_image = init_image * (mask < 0.5)
_, _, height, width = init_image.shape
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
num_channels_unet = self.unet.config.in_channels
return_image_latents = num_channels_unet == 4
add_noise = True if denoising_start is None else False
latents_outputs = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
image=init_image,
timestep=latent_timestep,
is_strength_max=is_strength_max,
return_noise=True,
return_image_latents=return_image_latents,
newx=newx,
newy=newy,
newr=newr,
current_seed=current_seed,
use_noise_moving=use_noise_moving,
)
if return_image_latents:
latents, noise, image_latents = latents_outputs
else:
latents, noise = latents_outputs
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
elif num_channels_unet != 4:
raise ValueError(
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
)
# 8.1 Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8.2 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
if isinstance(self.controlnet, MultiControlNetModel):
controlnet_keep.append(keeps)
else:
controlnet_keep.append(keeps[0])
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 10. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
controlnet_added_cond_kwargs = {
"text_embeds": add_text_embeds.chunk(2)[1],
"time_ids": add_time_ids.chunk(2)[1],
}
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_added_cond_kwargs = added_cond_kwargs
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
# # Resize control_image to match the size of the input to the controlnet
# if control_image.shape[-2:] != control_model_input.shape[-2:]:
# control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False)
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
added_cond_kwargs=controlnet_added_cond_kwargs,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
print("rescale: ", guidance_rescale)
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf | noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) | 2 | 2023-12-07 14:03:31+00:00 | 8k |
eliphatfs/zerorf | lib/core/ssdnerf_gui.py | [
{
"identifier": "vdb_utils",
"path": "lib/core/utils/vdb_utils.py",
"snippet": "def bit_index(xyz, ratio, level):\ndef write_lenstr(buffer: list, s: str):\ndef getlen(buffer: list):\ndef coo_to_mask(nelem: int, coo: list):\ndef coo_to_dense(nelem: int, coo: list, vals: list):\ndef write_inter_node(buffer: list, node: dict, nelem: int):\ndef dumps(density: numpy.ndarray, sparse_threshold: float = 0.01):\n def nestable_dict():"
},
{
"identifier": "rgetattr",
"path": "lib/core/utils/misc.py",
"snippet": "def rgetattr(obj, attr, *args):\n def _getattr(obj, attr):\n if isinstance(obj, MMDistributedDataParallel):\n obj = obj.module\n return getattr(obj, attr, *args)\n return functools.reduce(_getattr, [obj] + attr.split('.'))"
},
{
"identifier": "rsetattr",
"path": "lib/core/utils/misc.py",
"snippet": "def rsetattr(obj, attr, val):\n pre, _, post = attr.rpartition('.')\n pre = rgetattr(obj, pre) if pre else obj\n if isinstance(pre, MMDistributedDataParallel):\n pre = pre.module\n return setattr(pre, post, val)"
},
{
"identifier": "extract_geometry",
"path": "lib/core/utils/nerf_utils.py",
"snippet": "def extract_geometry(decoder, code_single, resolution=256, threshold=10):\n\n code_single = decoder.preproc(code_single[None]).squeeze(0)\n\n def query_func(pts):\n with torch.no_grad():\n pts = pts.to(code_single.device)[None]\n sigma = decoder.point_density_decode(\n pts,\n code_single[None])[0].flatten()\n out_mask = (pts.squeeze(0) < decoder.aabb[:3]).any(dim=-1) | (pts.squeeze(0) > decoder.aabb[3:]).any(dim=-1)\n sigma.masked_fill_(out_mask, 0)\n return sigma.float()\n\n aabb = decoder.aabb.float()\n vertices, triangles = _extract_geometry(\n aabb[:3] - 0.01, aabb[3:] + 0.01,\n resolution=resolution, threshold=threshold, query_func=query_func)\n return vertices, triangles"
},
{
"identifier": "surround_views",
"path": "lib/core/utils/camera_utils.py",
"snippet": "def surround_views(initial_pose, angle_amp=1.0, num_frames=60):\n rad = torch.from_numpy(\n np.linspace(0, 2 * np.pi, num=num_frames, endpoint=False)).to(initial_pose)\n\n initial_pos = initial_pose[:3, -1]\n initial_pos_dist = torch.linalg.norm(initial_pos)\n initial_pos_norm = initial_pos / initial_pos_dist\n initial_angle = torch.asin(initial_pos_norm[-1])\n\n angles = initial_angle * (rad.sin() * angle_amp + 1)\n pos_xy = F.normalize(initial_pos_norm[:2], dim=0) @ torch.stack(\n [rad.cos(), -rad.sin(),\n rad.sin(), rad.cos()], dim=-1).reshape(-1, 2, 2)\n pos = torch.cat(\n [pos_xy * angles.cos().unsqueeze(-1), angles.sin().unsqueeze(-1)],\n dim=-1) * initial_pos_dist\n rot = look_at(pos, torch.zeros_like(pos), pos.new_tensor([0, 0, 1]).expand(pos.size()))\n poses = torch.cat(\n [torch.cat([rot, pos.unsqueeze(-1)], dim=-1),\n rot.new_tensor([0, 0, 0, 1]).expand(num_frames, 1, -1)], dim=-2)\n\n return poses"
},
{
"identifier": "extract_fields",
"path": "lib/core/utils/nerf_utils.py",
"snippet": "def extract_fields(bound_min, bound_max, resolution, query_func, S=128):\n X = torch.linspace(bound_min[0], bound_max[0], resolution).split(S)\n Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(S)\n Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(S)\n\n u = np.zeros([resolution, resolution, resolution], dtype=np.float32)\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = query_func(pts).reshape(len(xs), len(ys),\n len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n u[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val\n return u"
},
{
"identifier": "load_pose",
"path": "lib/datasets/shapenet_srn.py",
"snippet": "def load_pose(path):\n pose = np.loadtxt(path, dtype=np.float32, delimiter=' ').reshape(4, 4)\n return torch.from_numpy(pose)"
},
{
"identifier": "load_intrinsics",
"path": "lib/datasets/shapenet_srn.py",
"snippet": "def load_intrinsics(path):\n with open(path, 'r') as file:\n f, cx, cy, _ = map(float, file.readline().split())\n grid_barycenter = list(map(float, file.readline().split()))\n scale = float(file.readline())\n height, width = map(int, file.readline().split())\n fx = fy = f\n return fx, fy, cx, cy, height, width"
}
] | import os
import random
import math
import copy
import json
import numpy as np
import trimesh
import torch
import torch.nn.functional as F
import cv2
import mmcv
import dearpygui.dearpygui as dpg
import matplotlib.pyplot as plotlib
from scipy.spatial.transform import Rotation as R
from mmgen.models.builder import build_module
from mmgen.models.architectures.common import get_module_device
from mmgen.apis import set_random_seed # isort:skip # noqa
from .utils import extract_geometry, surround_views, vdb_utils, rgetattr, rsetattr
from .utils.nerf_utils import extract_fields
from lib.datasets.shapenet_srn import load_pose, load_intrinsics
from videoio import VideoWriter | 5,012 | self.model.test_cfg['extra_scene_step'] = app_data - 1
with dpg.group(horizontal=True):
dpg.add_button(label='load input img', callback=lambda: dpg.show_item('guide_image_dialog'))
dpg.add_slider_float(
label='overlay', min_value=0.0, max_value=1.0, width=170,
default_value=self.overlay_opacity, callback=callback_set_guide_overlay)
dpg.add_text('Guidance params:')
dpg.add_input_float(
label='guidance gain', width=130, default_value=self.guide_gain, callback=callback_set_guide_gain)
dpg.add_input_float(
label='SNR power', width=100,
default_value=self.model_diffusion.test_cfg.get(
'snr_weight_power', self.model_diffusion.timestep_sampler.power),
format='%.3f', callback=callback_set_snr_power)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='langevin steps', width=90, default_value=self.model_diffusion.test_cfg.get('langevin_steps', 0),
min_value=0, max_value=100, min_clamped=True, callback=callback_set_langevin_steps)
dpg.add_input_float(
label='delta', width=100, default_value=self.model_diffusion.test_cfg.get('langevin_delta', 0.4),
format='%.2f', callback=callback_set_langevin_delta)
dpg.add_text('Finetuning optim params:')
dpg.add_input_float(
label='ddpm loss gain', width=130,
default_value=rgetattr(self.model, self.ddpm_loss_key) / self.train_ddpm_weight,
callback=callback_set_ddpm_loss_gain)
dpg.add_input_float(
label='learning rate', width=130, default_value=self.ft_optimizer['lr'], format='%.2e',
callback=callback_set_learning_rate)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='Outer steps', width=90, default_value=self.model.test_cfg.get('n_inverse_steps', 25),
min_value=0, max_value=1000, min_clamped=True, callback=callback_set_outer_loop_steps)
dpg.add_input_int(
label='Inner steps', width=90, default_value=self.model.test_cfg.get('extra_scene_step', 3) + 1,
min_value=1, max_value=100, min_clamped=True, callback=callback_set_inner_loop_steps)
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default', 'guide'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name'])
assert fx == fy and cx == w / 2 and cy == h / 2, 'GUI supports only rectified images'
self.active_cam.fovy = np.rad2deg(2 * np.arctan2(h / 2, fy))
update_camera_status()
self.need_update = True
def callback_load_extrinsic(sender, app_data):
| # modified from torch-ngp
def load_img(path, background=[1., 1., 1.]):
bgra = mmcv.imread(
path, flag='unchanged', channel_order='bgr'
).astype(np.float32) / 255
bgr = bgra[:, :, :3]
rgb = bgr[:, :, ::-1]
if bgra.shape[2] == 4:
alpha = bgra[:, :, 3:4]
rgb = rgb * alpha + np.array(background, dtype=np.float32) * (1 - alpha)
return np.ascontiguousarray(rgb)
class OrbitCamera:
def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):
self.name = name
self.W = W
self.H = H
self.radius = r # camera distance from center
self.fovy = fovy # in degree
self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])
self.rot = copy.deepcopy(self.default_rot)
self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!
self.set_euler(euler)
# pose
@property
def pose(self):
# first move camera to radius
res = np.eye(4, dtype=np.float32)
res[2, 3] -= self.radius
# rotate
rot = np.eye(4, dtype=np.float32)
rot[:3, :3] = self.rot.as_matrix()
res = rot @ res
# translate
res[:3, 3] -= self.center
return res
def set_pose(self, pose):
self.rot = R.from_matrix(pose[:3, :3])
self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius
@property
def intrinsics(self):
focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))
return np.array([focal, focal, self.W / 2, self.H / 2])
@property
def euler(self):
return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)
def set_euler(self, euler):
self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot
def orbit(self, dx, dy):
# rotate along camera up/side axis!
side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.
rotvec_x = self.up * np.radians(-0.1 * dx)
rotvec_y = side * np.radians(-0.1 * dy)
self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
def scale(self, delta):
self.radius *= 1.1 ** (-delta)
def pan(self, dx, dy, dz=0):
# pan in camera coordinate system (careful on the sensitivity!)
self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])
def pose2str(self):
with np.printoptions(precision=3, suppress=True):
return str(self.pose)
class SSDNeRFGUI:
default_cam_fovy = 52.0
default_cam_radius = 2.6
default_cam_euler = [0.0, 23.0, -47.4]
def __init__(self, model, W=512, H=512, max_spp=1, debug=True):
self.W = W
self.H = H
self.max_spp = max_spp
self.default_cam = OrbitCamera(
'default', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.guide_cam = OrbitCamera(
'guide', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.active_cam = self.default_cam
self.debug = debug
self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg
self.step = 0 # training step
self.model = model
self.model_decoder = model.decoder_ema if model.decoder_use_ema else model.decoder
self.model_diffusion = model.diffusion_ema if model.diffusion_use_ema else model.diffusion
self.video_sec = 4
self.video_fps = 30
self.video_res = 256
self.render_buffer = np.zeros((self.H, self.W, 3), dtype=np.float32)
self.need_update = True # camera moved, should reset accumulation
self.spp = 1 # sample per pixel
self.dt_gamma_scale = 0.0
self.density_thresh = 0.1
self.mode = 'image' # choose from ['image', 'depth']
self.mesh_resolution = 256
self.mesh_threshold = 10
self.scene_name = 'model_default'
self.sampling_mode = 'text'
self.pos_prompt = ''
self.neg_prompt = ''
self.diffusion_seed = -1
self.diffusion_steps = model.test_cfg.get('num_timesteps', 20)
self.diffusion_sampler = 'DDIM'
self.cfg_scale = 1.0
self.embed_guidance_scale = 0.0
self.clip_denoised = True
dtype = next(self.model_decoder.parameters()).dtype
if self.model.init_code is None:
self.code_buffer = torch.zeros(
self.model.code_size, device=get_module_device(self.model), dtype=dtype)
else:
self.code_buffer = self.model.init_code.clone().to(dtype)
_, self.density_bitfield = self.model.get_density(
self.model_decoder, self.code_buffer[None],
cfg=dict(density_thresh=self.density_thresh, density_step=16))
self.dynamic_resolution = False
self.downscale = 1
self.image_enhancer = build_module(dict(
type='SRVGGNetCompact',
# num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu',
num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu',
# pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'
pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
)).half().eval().requires_grad_(False)
if torch.cuda.is_available():
self.image_enhancer.cuda()
self.use_image_enhancer = False
self.guide_image = None
self.guide_image_overlay = None
if 'guidance_gain' in model.test_cfg and 'n_inverse_rays' in model.test_cfg:
self.guide_gain = model.test_cfg['guidance_gain'] / model.test_cfg['n_inverse_rays']
else:
self.guide_gain = 1.0
self.overlay_opacity = 0.3
self.code_viz_range = model.test_cfg.get('clip_range', [-1, 1])
self.ddpm_loss_key = 'diffusion_ema.ddpm_loss.weight_scale' if model.diffusion_use_ema else 'diffusion.ddpm_loss.weight_scale'
self.train_ddpm_weight = model.train_cfg_backup.get(
self.ddpm_loss_key, rgetattr(model, self.ddpm_loss_key))
self.loss_coef = 0.1 # ignore model's test cfg
self.ft_optimizer = model.test_cfg.get(
'optimizer', dict(type='Adam', lr=model.train_cfg['optimizer']['lr'] / 2, weight_decay=0.))
self.ft_lr_scheduler = model.test_cfg.get(
'lr_scheduler', dict(type='ExponentialLR', gamma=0.998))
self.extrinsic_ndc_scale = 2.0 # default shapenet dataset value
dpg.create_context()
if self.debug:
dpg.configure_app(manual_callback_management=True)
self.register_dpg()
self.test_step()
def __del__(self):
dpg.destroy_context()
def prepare_buffer(self, outputs):
if self.mode == 'image':
return outputs['image']
else:
return np.expand_dims(outputs['depth'], -1).repeat(3, -1)
def test_gui(self, pose, intrinsics, W, H, bg_color, spp, dt_gamma_scale, downscale):
with torch.no_grad():
self.model.bg_color = bg_color.to(self.code_buffer.device)
if self.use_image_enhancer and self.mode == 'image':
rH, rW = H // 2, W // 2
intrinsics = intrinsics / 2
else:
rH, rW = H, W
image, depth = self.model.render(
self.model_decoder,
self.code_buffer[None],
self.density_bitfield[None], rH, rW,
self.code_buffer.new_tensor(intrinsics * downscale, dtype=torch.float32)[None, None],
self.code_buffer.new_tensor(pose, dtype=torch.float32)[None, None],
cfg=dict(dt_gamma_scale=dt_gamma_scale))
if self.use_image_enhancer and self.mode == 'image':
image = self.image_enhancer(image[0].half().permute(0, 3, 1, 2))
image = F.interpolate(image, size=(H, W), mode='area').permute(0, 2, 3, 1)[None].float()
results = dict(
image=image[0, 0],
depth=depth[0, 0])
if downscale != 1:
results['image'] = F.interpolate(
results['image'].permute(2, 0, 1)[None], size=(H, W), mode='nearest'
).permute(0, 2, 3, 1).reshape(H, W, 3)
results['depth'] = F.interpolate(results['depth'][None, None], size=(H, W), mode='nearest').reshape(H, W)
if self.overlay_opacity > 0.003 and self.guide_image is not None and self.active_cam.name == 'guide':
results['image'] = self.guide_image_overlay * self.overlay_opacity + results['image'] * (1 - self.overlay_opacity)
results['image'] = results['image'].cpu().numpy()
results['depth'] = results['depth'].cpu().numpy()
return results
def update_params(self):
with torch.no_grad():
self.density_bitfield = self.model.get_density(
self.model_decoder, self.code_buffer[None],
cfg=dict(density_thresh=self.density_thresh, density_step=16))[1].squeeze(0)
def test_step(self):
# TODO: seems we have to move data from GPU --> CPU --> GPU?
if self.need_update or self.spp < self.max_spp:
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
outputs = self.test_gui(
self.active_cam.pose, self.active_cam.intrinsics,
self.W, self.H, self.bg_color, self.spp, self.dt_gamma_scale, self.downscale)
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
# update dynamic resolution
if self.dynamic_resolution:
# max allowed infer time per-frame is 200 ms
full_t = t / (self.downscale ** 2)
downscale = min(1, max(1 / 4, math.sqrt(200 / full_t)))
if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:
self.downscale = downscale
if self.need_update:
self.render_buffer = np.ascontiguousarray(self.prepare_buffer(outputs))
self.spp = 1
self.need_update = False
else:
self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)
self.spp += 1
dpg.set_value('_log_infer_time', f'{t:.4f}ms ({int(1000 / t)} FPS)')
dpg.set_value('_log_resolution', f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')
dpg.set_value('_log_spp', self.spp)
dpg.set_value('_log_scene_name', self.scene_name)
dpg.set_value('_texture', self.render_buffer)
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.group(horizontal=True):
dpg.add_text('SPP: ')
dpg.add_text('1', tag='_log_spp')
with dpg.collapsing_header(label='SSDNeRF', default_open=True):
def callback_diffusion_generate(sender, app_data):
diffusion_seed = random.randint(0, 2**31) if self.diffusion_seed == -1 else self.diffusion_seed
set_random_seed(diffusion_seed, deterministic=True)
noise = torch.randn((1,) + self.model.code_size)
self.model_diffusion.test_cfg['num_timesteps'] = self.diffusion_steps
self.model_diffusion.sample_method = self.diffusion_sampler
self.model_diffusion.test_cfg['cfg_scale'] = self.cfg_scale
self.model_diffusion.test_cfg['embed_guidance_scale'] = self.embed_guidance_scale
self.model_diffusion.test_cfg['clip_denoised'] = self.clip_denoised
device = get_module_device(self.model)
data = dict(
noise=noise.to(device),
scene_id=[0],
scene_name=['seed_{}'.format(diffusion_seed)],
prompts=[self.pos_prompt],
neg_prompts=[self.neg_prompt])
if self.sampling_mode == 'image_text':
data['extra_cond_img'] = self.extra_cond_image
data['extra_pose_cond'] = torch.tensor(self.guide_cam.pose[:3].reshape(1, 12)).to(device).float()
if self.sampling_mode in ['guide', 'optim']:
scale = max(self.guide_image.shape[1] / self.W, self.guide_image.shape[0] / self.H)
data['cond_imgs'] = self.guide_image[None, None]
data['cond_intrinsics'] = torch.tensor(
self.guide_cam.intrinsics[None, None] * np.array([
scale, scale,
self.guide_image.size(1) / self.W, self.guide_image.size(0) / self.H])
).to(device).float()
data['cond_poses'] = torch.tensor(self.guide_cam.pose[None, None]).to(device).float()
self.model_diffusion.test_cfg['n_inverse_rays'] = self.guide_image.numel()
self.model.test_cfg['loss_coef'] = self.loss_coef / self.guide_image.numel()
if self.sampling_mode == 'guide':
self.model_diffusion.test_cfg['guidance_gain'] = self.guide_gain * self.guide_image.numel()
if self.sampling_mode == 'optim':
self.model.test_cfg['optimizer'] = self.ft_optimizer
self.model.test_cfg['lr_scheduler'] = self.ft_lr_scheduler
optim_kwargs = dict(
code_=self.model.code_activation.inverse(self.code_buffer[None]))
else:
optim_kwargs = dict()
with torch.no_grad():
sample_fun = getattr(self.model, 'val_' + self.sampling_mode)
code, density_grid, density_bitfield = sample_fun(
data, show_pbar=True, **optim_kwargs)
self.code_buffer = code[0].to(self.code_buffer)
self.density_bitfield = density_bitfield[0]
self.scene_name = 'seed_{}'.format(diffusion_seed)
self.need_update = True
print("Peak VRAM usage:", int(torch.cuda.max_memory_allocated() / 1024 ** 2 + 1), "(M)")
def callback_change_mode(sender, app_data):
self.sampling_mode = app_data
def callback_change_sampler(sender, app_data):
self.diffusion_sampler = app_data
with dpg.group(horizontal=True):
dpg.add_combo(
('text', 'image_text', 'uncond', 'guide', 'optim'), label='mode', default_value=self.sampling_mode,
width=75, callback=callback_change_mode)
dpg.add_combo(
self.model_diffusion.available_samplers, label='sampler', default_value=self.diffusion_sampler,
width=190, callback=callback_change_sampler)
def callback_set_pos_prompt(sender, app_data):
self.pos_prompt = app_data
dpg.add_input_text(
label='prompt', width=290, default_value=self.pos_prompt, callback=callback_set_pos_prompt)
def callback_set_neg_prompt(sender, app_data):
self.neg_prompt = app_data
dpg.add_input_text(
label='neg prompt', width=290, default_value=self.neg_prompt, callback=callback_set_neg_prompt)
def callback_set_cfg_scale(sender, app_data):
self.cfg_scale = app_data
dpg.add_input_float(
label='prompt scale', width=100, default_value=self.cfg_scale, callback=callback_set_cfg_scale)
def callback_set_embed_guidance_scale(sender, app_data):
self.embed_guidance_scale = app_data
dpg.add_input_float(
label='embed guidance', width=100, default_value=self.embed_guidance_scale, callback=callback_set_embed_guidance_scale)
def callback_set_diffusion_seed(sender, app_data):
self.diffusion_seed = app_data
def callback_set_diffusion_steps(sender, app_data):
self.diffusion_steps = app_data
def callback_set_clip_denoised(sender, app_data):
self.clip_denoised = app_data
dpg.add_checkbox(label='clip denoised', callback=callback_set_clip_denoised,
default_value=self.clip_denoised)
with dpg.group(horizontal=True):
dpg.add_button(label='Generate', callback=callback_diffusion_generate)
dpg.add_input_int(
label='seed', width=130, min_value=-1, max_value=2**31 - 1, min_clamped=True, max_clamped=True,
default_value=self.diffusion_seed, callback=callback_set_diffusion_seed, tag='seed')
dpg.add_input_int(
label='steps', width=80, min_value=1, max_value=1000, min_clamped=True, max_clamped=True,
default_value=self.diffusion_steps, callback=callback_set_diffusion_steps)
def callback_save_scene(sender, app_data):
path = app_data['file_path_name']
out = dict(
param=dict(
code=self.code_buffer.cpu(),
density_bitfield=self.density_bitfield.cpu()))
torch.save(out, path)
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_save_scene, tag='save_scene_dialog'):
dpg.add_file_extension('.pth')
with dpg.group(horizontal=True):
dpg.add_button(label='Save scene', callback=lambda: dpg.show_item('save_scene_dialog'))
# scene selector
def callback_load_scene(sender, app_data):
self.scene_name = os.path.splitext(app_data['file_name'])[0]
scene = torch.load(app_data['file_path_name'], map_location='cpu')
self.code_buffer = (
scene['param']['code'] if 'code' in scene['param']
else self.model.code_activation(scene['param']['code_'])).to(self.code_buffer)
self.update_params()
print('Loaded scene: ' + self.scene_name)
self.need_update = True
def callback_recover_seed(sender, app_data):
if self.scene_name.startswith('seed_'):
seed = int(self.scene_name[5:])
self.diffusion_seed = seed
dpg.set_value('seed', seed)
print('Recovered seed: ' + str(seed))
else:
print('Failed to recover seed: ' + self.scene_name)
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_load_scene, tag='scene_selector_dialog'):
dpg.add_file_extension('.pth')
with dpg.group(horizontal=True):
dpg.add_button(label='Load scene', callback=lambda: dpg.show_item('scene_selector_dialog'))
dpg.add_text(tag='_log_scene_name')
dpg.add_button(label='Recover seed', callback=callback_recover_seed)
# save geometry
def callback_export_mesh(sender, app_data):
self.export_mesh(app_data['file_path_name'])
def callback_export_vdb(sender, app_data):
self.export_vdb(app_data['file_path_name'])
def callback_save_code(sender, app_data):
dir_path = app_data['file_path_name']
assert os.path.isdir(dir_path), dir_path + ' is not a directory'
self.model_decoder.visualize(
self.code_buffer[None], [self.scene_name], dir_path, code_range=self.code_viz_range)
def callback_set_vmin(sender, app_data):
self.code_viz_range[0] = app_data
def callback_set_vmax(sender, app_data):
self.code_viz_range[1] = app_data
def callback_set_mesh_resolution(sender, app_data):
self.mesh_resolution = app_data
def callback_set_mesh_threshold(sender, app_data):
self.mesh_threshold = app_data
def callback_set_video_resolution(sender, app_data):
self.video_res = app_data
def callback_set_video_sec(sender, app_data):
self.video_sec = app_data
def callback_export_screenshot(sender, app_data):
path = app_data['file_path_name']
cv2.imwrite(path, np.round(self.render_buffer[..., ::-1] * 255).astype(np.uint8))
def callback_export_multi_view(sender, app_data):
dir_path = app_data['file_path_name']
assert os.path.isdir(dir_path), dir_path + ' is not a directory'
self.export_multi_view_data(dir_path)
def callback_export_video(sender, app_data):
path = app_data['file_path_name']
num_frames = int(round(self.video_fps * self.video_sec))
tmp_cam = OrbitCamera(
'tmp', self.video_res, self.video_res,
r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
camera_poses = surround_views(
self.code_buffer.new_tensor(tmp_cam.pose, dtype=torch.float32), num_frames=num_frames)
writer = VideoWriter(
path,
resolution=(self.video_res, self.video_res),
lossless=False,
fps=self.video_fps)
bs = 4
device = self.code_buffer.device
with torch.no_grad():
prog = mmcv.ProgressBar(num_frames)
prog.start()
for pose_batch in camera_poses.split(bs, dim=0):
intrinsics = self.code_buffer.new_tensor(
tmp_cam.intrinsics[None], dtype=torch.float32).expand(pose_batch.size(0), -1)[None]
res = self.video_res
if self.use_image_enhancer:
res = res // 2
intrinsics = intrinsics * (res / self.video_res)
image_batch, depth = self.model.render(
self.model_decoder,
self.code_buffer[None],
self.density_bitfield[None], res, res,
intrinsics,
pose_batch.to(device)[None])
if self.use_image_enhancer:
image_batch = self.image_enhancer(image_batch[0].half().permute(0, 3, 1, 2).clamp(min=0, max=1))
image_batch = F.interpolate(
image_batch, size=(self.video_res, self.video_res), mode='area'
).permute(0, 2, 3, 1)[None]
for image in torch.round(image_batch[0].clamp(min=0, max=1) * 255).to(torch.uint8).cpu().numpy():
writer.write(image)
prog.update(bs)
writer.close()
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_mesh, tag='export_mesh_dialog'):
dpg.add_file_extension('.stl')
dpg.add_file_extension('.dict')
dpg.add_file_extension('.json')
dpg.add_file_extension('.glb')
dpg.add_file_extension('.obj')
dpg.add_file_extension('.gltf')
dpg.add_file_extension('.dict64')
dpg.add_file_extension('.msgpack')
dpg.add_file_extension('.stl_ascii')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_vdb, tag='export_vdb_dialog'):
dpg.add_file_extension('.vdb')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_save_code, tag='save_code_dialog'):
dpg.add_file_extension('.')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_screenshot, tag='export_screenshot_dialog'):
dpg.add_file_extension('.png')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_multi_view, tag='export_multi_view_dialog'):
dpg.add_file_extension('.')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_video, tag='export_video_dialog'):
dpg.add_file_extension('.mp4')
with dpg.group(horizontal=True):
dpg.add_button(label='Export screenshot', callback=lambda: dpg.show_item('export_screenshot_dialog'))
dpg.add_button(label='Export multi-view', callback=lambda: dpg.show_item('export_multi_view_dialog'))
with dpg.group(horizontal=True):
dpg.add_button(label='Export video', callback=lambda: dpg.show_item('export_video_dialog'))
dpg.add_input_int(
label='res', width=90, min_value=4, max_value=1024, min_clamped=True, max_clamped=True,
default_value=self.video_res, callback=callback_set_video_resolution)
dpg.add_input_float(
label='len', width=100, min_value=0, max_value=10, min_clamped=True, max_clamped=True,
default_value=self.video_sec, callback=callback_set_video_sec, format='%.1f sec')
with dpg.group(horizontal=True):
dpg.add_button(label='Export mesh', callback=lambda: dpg.show_item('export_mesh_dialog'))
dpg.add_input_int(
label='res', width=90, min_value=4, max_value=1024, min_clamped=True, max_clamped=True,
default_value=self.mesh_resolution, callback=callback_set_mesh_resolution)
dpg.add_input_float(
label='thr', width=100, min_value=0, max_value=1000, min_clamped=True, max_clamped=True,
format='%.2f', default_value=self.mesh_threshold, callback=callback_set_mesh_threshold)
dpg.add_button(label='Export volume', callback=lambda: dpg.show_item('export_vdb_dialog'))
with dpg.group(horizontal=True):
dpg.add_button(label='Export code viz', callback=lambda: dpg.show_item('save_code_dialog'))
dpg.add_input_float(
label='vmin', width=85, format='%.1f',
default_value=self.code_viz_range[0], callback=callback_set_vmin)
dpg.add_input_float(
label='vmax', width=85, format='%.1f',
default_value=self.code_viz_range[1], callback=callback_set_vmax)
with dpg.collapsing_header(label='Guidance/finetuning options', default_open=False):
def callback_load_guide_image(sender, app_data):
img = load_img(app_data['file_path_name'], [0.5, 0.5, 0.5])
img = (img - 0.5) * 1.2
self.extra_cond_image = torch.tensor(
cv2.resize(img, [384, 384], interpolation=cv2.INTER_LINEAR)
)[None].float().to(self.code_buffer.device)
self.guide_image = torch.tensor(
load_img(app_data['file_path_name'])).float().to(self.code_buffer.device)
bg = self.bg_color.to(self.guide_image.device)[:, None, None]
scale = min(self.W / self.guide_image.shape[1], self.H / self.guide_image.shape[0])
grid = F.affine_grid(
torch.tensor(
[[self.W / (self.guide_image.shape[1] * scale), 0, 0],
[0, self.H / (self.guide_image.shape[0] * scale), 0]],
dtype=self.guide_image.dtype, device=self.guide_image.device)[None],
[1, 3, self.H, self.W], align_corners=False)
self.guide_image_overlay = (F.grid_sample(
self.guide_image.permute(2, 0, 1)[None] - bg,
grid, mode='nearest', padding_mode='zeros', align_corners=False,
) + bg).squeeze(0).permute(1, 2, 0)
self.active_cam = self.guide_cam
update_camera_status()
dpg.set_value('cam_combo', 'guide')
self.need_update = True
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_load_guide_image, tag='guide_image_dialog'):
dpg.add_file_extension('.png')
def callback_set_guide_gain(sender, app_data):
self.guide_gain = app_data
def callback_set_guide_overlay(sender, app_data):
self.overlay_opacity = app_data
self.need_update = True
def callback_set_snr_power(sender, app_data):
self.model_diffusion.test_cfg['snr_weight_power'] = app_data
def callback_set_langevin_steps(sender, app_data):
self.model_diffusion.test_cfg['langevin_steps'] = app_data
def callback_set_langevin_delta(sender, app_data):
self.model_diffusion.test_cfg['langevin_delta'] = app_data
def callback_set_ddpm_loss_gain(sender, app_data):
rsetattr(self.model, self.ddpm_loss_key, app_data * self.train_ddpm_weight)
def callback_set_learning_rate(sender, app_data):
self.ft_optimizer['lr'] = app_data
def callback_set_outer_loop_steps(sender, app_data):
self.model.test_cfg['n_inverse_steps'] = app_data
def callback_set_inner_loop_steps(sender, app_data):
self.model.test_cfg['extra_scene_step'] = app_data - 1
with dpg.group(horizontal=True):
dpg.add_button(label='load input img', callback=lambda: dpg.show_item('guide_image_dialog'))
dpg.add_slider_float(
label='overlay', min_value=0.0, max_value=1.0, width=170,
default_value=self.overlay_opacity, callback=callback_set_guide_overlay)
dpg.add_text('Guidance params:')
dpg.add_input_float(
label='guidance gain', width=130, default_value=self.guide_gain, callback=callback_set_guide_gain)
dpg.add_input_float(
label='SNR power', width=100,
default_value=self.model_diffusion.test_cfg.get(
'snr_weight_power', self.model_diffusion.timestep_sampler.power),
format='%.3f', callback=callback_set_snr_power)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='langevin steps', width=90, default_value=self.model_diffusion.test_cfg.get('langevin_steps', 0),
min_value=0, max_value=100, min_clamped=True, callback=callback_set_langevin_steps)
dpg.add_input_float(
label='delta', width=100, default_value=self.model_diffusion.test_cfg.get('langevin_delta', 0.4),
format='%.2f', callback=callback_set_langevin_delta)
dpg.add_text('Finetuning optim params:')
dpg.add_input_float(
label='ddpm loss gain', width=130,
default_value=rgetattr(self.model, self.ddpm_loss_key) / self.train_ddpm_weight,
callback=callback_set_ddpm_loss_gain)
dpg.add_input_float(
label='learning rate', width=130, default_value=self.ft_optimizer['lr'], format='%.2e',
callback=callback_set_learning_rate)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='Outer steps', width=90, default_value=self.model.test_cfg.get('n_inverse_steps', 25),
min_value=0, max_value=1000, min_clamped=True, callback=callback_set_outer_loop_steps)
dpg.add_input_int(
label='Inner steps', width=90, default_value=self.model.test_cfg.get('extra_scene_step', 3) + 1,
min_value=1, max_value=100, min_clamped=True, callback=callback_set_inner_loop_steps)
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default', 'guide'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name'])
assert fx == fy and cx == w / 2 and cy == h / 2, 'GUI supports only rectified images'
self.active_cam.fovy = np.rad2deg(2 * np.arctan2(h / 2, fy))
update_camera_status()
self.need_update = True
def callback_load_extrinsic(sender, app_data): | c2w = load_pose(app_data['file_path_name']) | 6 | 2023-12-14 03:29:28+00:00 | 8k |
geopavlakos/hamer | hamer/datasets/image_dataset.py | [
{
"identifier": "Dataset",
"path": "hamer/datasets/dataset.py",
"snippet": "class Dataset(metaclass=DatasetRegistration):\n \"\"\"\n Base Dataset class\n \"\"\"\n def __init__(self, *args, **kwargs):\n pass"
},
{
"identifier": "get_example",
"path": "hamer/datasets/utils.py",
"snippet": "def get_example(img_path: str|np.ndarray, center_x: float, center_y: float,\n width: float, height: float,\n keypoints_2d: np.array, keypoints_3d: np.array,\n mano_params: Dict, has_mano_params: Dict,\n flip_kp_permutation: List[int],\n patch_width: int, patch_height: int,\n mean: np.array, std: np.array,\n do_augment: bool, is_right: bool, augm_config: CfgNode,\n is_bgr: bool = True,\n use_skimage_antialias: bool = False,\n border_mode: int = cv2.BORDER_CONSTANT,\n return_trans: bool = False) -> Tuple:\n \"\"\"\n Get an example from the dataset and (possibly) apply random augmentations.\n Args:\n img_path (str): Image filename\n center_x (float): Bounding box center x coordinate in the original image.\n center_y (float): Bounding box center y coordinate in the original image.\n width (float): Bounding box width.\n height (float): Bounding box height.\n keypoints_2d (np.array): Array with shape (N,3) containing the 2D keypoints in the original image coordinates.\n keypoints_3d (np.array): Array with shape (N,4) containing the 3D keypoints.\n mano_params (Dict): MANO parameter annotations.\n has_mano_params (Dict): Whether MANO annotations are valid.\n flip_kp_permutation (List): Permutation to apply to the keypoints after flipping.\n patch_width (float): Output box width.\n patch_height (float): Output box height.\n mean (np.array): Array of shape (3,) containing the mean for normalizing the input image.\n std (np.array): Array of shape (3,) containing the std for normalizing the input image.\n do_augment (bool): Whether to apply data augmentation or not.\n aug_config (CfgNode): Config containing augmentation parameters.\n Returns:\n return img_patch, keypoints_2d, keypoints_3d, mano_params, has_mano_params, img_size\n img_patch (np.array): Cropped image patch of shape (3, patch_height, patch_height)\n keypoints_2d (np.array): Array with shape (N,3) containing the transformed 2D keypoints.\n keypoints_3d (np.array): Array with shape (N,4) containing the transformed 3D keypoints.\n mano_params (Dict): Transformed MANO parameters.\n has_mano_params (Dict): Valid flag for transformed MANO parameters.\n img_size (np.array): Image size of the original image.\n \"\"\"\n if isinstance(img_path, str):\n # 1. load image\n cvimg = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n if not isinstance(cvimg, np.ndarray):\n raise IOError(\"Fail to read %s\" % img_path)\n elif isinstance(img_path, np.ndarray):\n cvimg = img_path\n else:\n raise TypeError('img_path must be either a string or a numpy array')\n img_height, img_width, img_channels = cvimg.shape\n\n img_size = np.array([img_height, img_width])\n\n # 2. get augmentation params\n if do_augment:\n scale, rot, do_flip, do_extreme_crop, extreme_crop_lvl, color_scale, tx, ty = do_augmentation(augm_config)\n else:\n scale, rot, do_flip, do_extreme_crop, extreme_crop_lvl, color_scale, tx, ty = 1.0, 0, False, False, 0, [1.0, 1.0, 1.0], 0., 0.\n\n # if it's a left hand, we flip\n if not is_right:\n do_flip = True\n\n if width < 1 or height < 1:\n breakpoint()\n\n if do_extreme_crop:\n if extreme_crop_lvl == 0:\n center_x1, center_y1, width1, height1 = extreme_cropping(center_x, center_y, width, height, keypoints_2d)\n elif extreme_crop_lvl == 1:\n center_x1, center_y1, width1, height1 = extreme_cropping_aggressive(center_x, center_y, width, height, keypoints_2d)\n\n THRESH = 4\n if width1 < THRESH or height1 < THRESH:\n # print(f'{do_extreme_crop=}')\n # print(f'width: {width}, height: {height}')\n # print(f'width1: {width1}, height1: {height1}')\n # print(f'center_x: {center_x}, center_y: {center_y}')\n # print(f'center_x1: {center_x1}, center_y1: {center_y1}')\n # print(f'keypoints_2d: {keypoints_2d}')\n # print(f'\\n\\n', flush=True)\n # breakpoint()\n pass\n # print(f'skip ==> width1: {width1}, height1: {height1}, width: {width}, height: {height}')\n else:\n center_x, center_y, width, height = center_x1, center_y1, width1, height1\n\n center_x += width * tx\n center_y += height * ty\n\n # Process 3D keypoints\n keypoints_3d = keypoint_3d_processing(keypoints_3d, flip_kp_permutation, rot, do_flip)\n\n # 3. generate image patch\n if use_skimage_antialias:\n # Blur image to avoid aliasing artifacts\n downsampling_factor = (patch_width / (width*scale))\n if downsampling_factor > 1.1:\n cvimg = gaussian(cvimg, sigma=(downsampling_factor-1)/2, channel_axis=2, preserve_range=True, truncate=3.0)\n\n img_patch_cv, trans = generate_image_patch_cv2(cvimg,\n center_x, center_y,\n width, height,\n patch_width, patch_height,\n do_flip, scale, rot, \n border_mode=border_mode)\n # img_patch_cv, trans = generate_image_patch_skimage(cvimg,\n # center_x, center_y,\n # width, height,\n # patch_width, patch_height,\n # do_flip, scale, rot, \n # border_mode=border_mode)\n\n image = img_patch_cv.copy()\n if is_bgr:\n image = image[:, :, ::-1]\n img_patch_cv = image.copy()\n img_patch = convert_cvimg_to_tensor(image)\n\n\n mano_params, has_mano_params = mano_param_processing(mano_params, has_mano_params, rot, do_flip)\n\n # apply normalization\n for n_c in range(min(img_channels, 3)):\n img_patch[n_c, :, :] = np.clip(img_patch[n_c, :, :] * color_scale[n_c], 0, 255)\n if mean is not None and std is not None:\n img_patch[n_c, :, :] = (img_patch[n_c, :, :] - mean[n_c]) / std[n_c]\n if do_flip:\n keypoints_2d = fliplr_keypoints(keypoints_2d, img_width, flip_kp_permutation)\n\n\n for n_jt in range(len(keypoints_2d)):\n keypoints_2d[n_jt, 0:2] = trans_point2d(keypoints_2d[n_jt, 0:2], trans)\n keypoints_2d[:, :-1] = keypoints_2d[:, :-1] / patch_width - 0.5\n\n if not return_trans:\n return img_patch, keypoints_2d, keypoints_3d, mano_params, has_mano_params, img_size\n else:\n return img_patch, keypoints_2d, keypoints_3d, mano_params, has_mano_params, img_size, trans"
},
{
"identifier": "expand_to_aspect_ratio",
"path": "hamer/datasets/utils.py",
"snippet": "def expand_to_aspect_ratio(input_shape, target_aspect_ratio=None):\n \"\"\"Increase the size of the bounding box to match the target shape.\"\"\"\n if target_aspect_ratio is None:\n return input_shape\n\n try:\n w , h = input_shape\n except (ValueError, TypeError):\n return input_shape\n\n w_t, h_t = target_aspect_ratio\n if h / w < h_t / w_t:\n h_new = max(w * h_t / w_t, h)\n w_new = w\n else:\n h_new = h\n w_new = max(h * w_t / h_t, w)\n if h_new < h or w_new < w:\n breakpoint()\n return np.array([w_new, h_new])"
}
] | import copy
import os
import numpy as np
import torch
import braceexpand
import cv2
import webdataset as wds
from typing import List
from yacs.config import CfgNode
from .dataset import Dataset
from .utils import get_example, expand_to_aspect_ratio | 4,568 | # Load the dataset
if epoch_size is not None:
resampled = True
#corrupt_filter = lambda sample: (sample['__key__'] not in CORRUPT_KEYS)
dataset = wds.WebDataset(expand_urls(urls),
nodesplitter=wds.split_by_node,
shardshuffle=True,
resampled=resampled,
cache_dir=cache_dir,
) #.select(corrupt_filter)
if train:
dataset = dataset.shuffle(100)
dataset = dataset.decode('rgb8').rename(jpg='jpg;jpeg;png')
# Process the dataset
dataset = dataset.compose(split_data)
# Filter/clean the dataset
SUPPRESS_KP_CONF_THRESH = cfg.DATASETS.get('SUPPRESS_KP_CONF_THRESH', 0.0)
SUPPRESS_BETAS_THRESH = cfg.DATASETS.get('SUPPRESS_BETAS_THRESH', 0.0)
SUPPRESS_BAD_POSES = cfg.DATASETS.get('SUPPRESS_BAD_POSES', False)
POSES_BETAS_SIMULTANEOUS = cfg.DATASETS.get('POSES_BETAS_SIMULTANEOUS', False)
BETAS_REG = cfg.DATASETS.get('BETAS_REG', False)
FILTER_NO_POSES = cfg.DATASETS.get('FILTER_NO_POSES', False)
FILTER_NUM_KP = cfg.DATASETS.get('FILTER_NUM_KP', 4)
FILTER_NUM_KP_THRESH = cfg.DATASETS.get('FILTER_NUM_KP_THRESH', 0.0)
FILTER_REPROJ_THRESH = cfg.DATASETS.get('FILTER_REPROJ_THRESH', 0.0)
FILTER_MIN_BBOX_SIZE = cfg.DATASETS.get('FILTER_MIN_BBOX_SIZE', 0.0)
if SUPPRESS_KP_CONF_THRESH > 0:
dataset = dataset.map(lambda x: suppress_bad_kps(x, thresh=SUPPRESS_KP_CONF_THRESH))
if SUPPRESS_BETAS_THRESH > 0:
dataset = dataset.map(lambda x: supress_bad_betas(x, thresh=SUPPRESS_BETAS_THRESH))
if SUPPRESS_BAD_POSES:
dataset = dataset.map(lambda x: supress_bad_poses(x))
if POSES_BETAS_SIMULTANEOUS:
dataset = dataset.map(lambda x: poses_betas_simultaneous(x))
if FILTER_NO_POSES:
dataset = dataset.select(lambda x: filter_no_poses(x))
if FILTER_NUM_KP > 0:
dataset = dataset.select(lambda x: filter_numkp(x, numkp=FILTER_NUM_KP, thresh=FILTER_NUM_KP_THRESH))
if FILTER_REPROJ_THRESH > 0:
dataset = dataset.select(lambda x: filter_reproj_error(x, thresh=FILTER_REPROJ_THRESH))
if FILTER_MIN_BBOX_SIZE > 0:
dataset = dataset.select(lambda x: filter_bbox_size(x, thresh=FILTER_MIN_BBOX_SIZE))
if BETAS_REG:
dataset = dataset.map(lambda x: set_betas_for_reg(x)) # NOTE: Must be at the end
use_skimage_antialias = cfg.DATASETS.get('USE_SKIMAGE_ANTIALIAS', False)
border_mode = {
'constant': cv2.BORDER_CONSTANT,
'replicate': cv2.BORDER_REPLICATE,
}[cfg.DATASETS.get('BORDER_MODE', 'constant')]
# Process the dataset further
dataset = dataset.map(lambda x: ImageDataset.process_webdataset_tar_item(x, train,
augm_config=cfg.DATASETS.CONFIG,
MEAN=MEAN, STD=STD, IMG_SIZE=IMG_SIZE,
BBOX_SHAPE=BBOX_SHAPE,
use_skimage_antialias=use_skimage_antialias,
border_mode=border_mode,
))
if epoch_size is not None:
dataset = dataset.with_epoch(epoch_size)
return dataset
@staticmethod
def process_webdataset_tar_item(item, train,
augm_config=None,
MEAN=DEFAULT_MEAN,
STD=DEFAULT_STD,
IMG_SIZE=DEFAULT_IMG_SIZE,
BBOX_SHAPE=None,
use_skimage_antialias=False,
border_mode=cv2.BORDER_CONSTANT,
):
# Read data from item
key = item['__key__']
image = item['jpg']
data = item['data.pyd']
mask = item['mask']
keypoints_2d = data['keypoints_2d']
keypoints_3d = data['keypoints_3d']
center = data['center']
scale = data['scale']
hand_pose = data['hand_pose']
betas = data['betas']
right = data['right']
has_hand_pose = data['has_hand_pose']
has_betas = data['has_betas']
# image_file = data['image_file']
# Process data
orig_keypoints_2d = keypoints_2d.copy()
center_x = center[0]
center_y = center[1]
bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max()
if bbox_size < 1:
breakpoint()
mano_params = {'global_orient': hand_pose[:3],
'hand_pose': hand_pose[3:],
'betas': betas
}
has_mano_params = {'global_orient': has_hand_pose,
'hand_pose': has_hand_pose,
'betas': has_betas
}
mano_params_is_axis_angle = {'global_orient': True,
'hand_pose': True,
'betas': False
}
augm_config = copy.deepcopy(augm_config)
# Crop image and (possibly) perform data augmentation
img_rgba = np.concatenate([image, mask.astype(np.uint8)[:,:,None]*255], axis=2)
|
def expand(s):
return os.path.expanduser(os.path.expandvars(s))
def expand_urls(urls: str|List[str]):
if isinstance(urls, str):
urls = [urls]
urls = [u for url in urls for u in braceexpand.braceexpand(expand(url))]
return urls
FLIP_KEYPOINT_PERMUTATION = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406])
DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225])
DEFAULT_IMG_SIZE = 256
class ImageDataset(Dataset):
@staticmethod
def load_tars_as_webdataset(cfg: CfgNode, urls: str|List[str], train: bool,
resampled=False,
epoch_size=None,
cache_dir=None,
**kwargs) -> Dataset:
"""
Loads the dataset from a webdataset tar file.
"""
IMG_SIZE = cfg.MODEL.IMAGE_SIZE
BBOX_SHAPE = cfg.MODEL.get('BBOX_SHAPE', None)
MEAN = 255. * np.array(cfg.MODEL.IMAGE_MEAN)
STD = 255. * np.array(cfg.MODEL.IMAGE_STD)
def split_data(source):
for item in source:
datas = item['data.pyd']
for data in datas:
if 'detection.npz' in item:
det_idx = data['extra_info']['detection_npz_idx']
mask = item['detection.npz']['masks'][det_idx]
else:
mask = np.ones_like(item['jpg'][:,:,0], dtype=bool)
yield {
'__key__': item['__key__'],
'jpg': item['jpg'],
'data.pyd': data,
'mask': mask,
}
def suppress_bad_kps(item, thresh=0.0):
if thresh > 0:
kp2d = item['data.pyd']['keypoints_2d']
kp2d_conf = np.where(kp2d[:, 2] < thresh, 0.0, kp2d[:, 2])
item['data.pyd']['keypoints_2d'] = np.concatenate([kp2d[:,:2], kp2d_conf[:,None]], axis=1)
return item
def filter_numkp(item, numkp=4, thresh=0.0):
kp_conf = item['data.pyd']['keypoints_2d'][:, 2]
return (kp_conf > thresh).sum() > numkp
def filter_reproj_error(item, thresh=10**4.5):
losses = item['data.pyd'].get('extra_info', {}).get('fitting_loss', np.array({})).item()
reproj_loss = losses.get('reprojection_loss', None)
return reproj_loss is None or reproj_loss < thresh
def filter_bbox_size(item, thresh=1):
bbox_size_min = item['data.pyd']['scale'].min().item() * 200.
return bbox_size_min > thresh
def filter_no_poses(item):
return (item['data.pyd']['has_hand_pose'] > 0)
def supress_bad_betas(item, thresh=3):
has_betas = item['data.pyd']['has_betas']
if thresh > 0 and has_betas:
betas_abs = np.abs(item['data.pyd']['betas'])
if (betas_abs > thresh).any():
item['data.pyd']['has_betas'] = False
return item
def supress_bad_poses(item):
has_hand_pose = item['data.pyd']['has_hand_pose']
if has_hand_pose:
hand_pose = item['data.pyd']['hand_pose']
pose_is_probable = poses_check_probable(torch.from_numpy(hand_pose)[None, 3:], amass_poses_hist100_smooth).item()
if not pose_is_probable:
item['data.pyd']['has_hand_pose'] = False
return item
def poses_betas_simultaneous(item):
# We either have both hand_pose and betas, or neither
has_betas = item['data.pyd']['has_betas']
has_hand_pose = item['data.pyd']['has_hand_pose']
item['data.pyd']['has_betas'] = item['data.pyd']['has_hand_pose'] = np.array(float((has_hand_pose>0) and (has_betas>0)))
return item
def set_betas_for_reg(item):
# Always have betas set to true
has_betas = item['data.pyd']['has_betas']
betas = item['data.pyd']['betas']
if not (has_betas>0):
item['data.pyd']['has_betas'] = np.array(float((True)))
item['data.pyd']['betas'] = betas * 0
return item
# Load the dataset
if epoch_size is not None:
resampled = True
#corrupt_filter = lambda sample: (sample['__key__'] not in CORRUPT_KEYS)
dataset = wds.WebDataset(expand_urls(urls),
nodesplitter=wds.split_by_node,
shardshuffle=True,
resampled=resampled,
cache_dir=cache_dir,
) #.select(corrupt_filter)
if train:
dataset = dataset.shuffle(100)
dataset = dataset.decode('rgb8').rename(jpg='jpg;jpeg;png')
# Process the dataset
dataset = dataset.compose(split_data)
# Filter/clean the dataset
SUPPRESS_KP_CONF_THRESH = cfg.DATASETS.get('SUPPRESS_KP_CONF_THRESH', 0.0)
SUPPRESS_BETAS_THRESH = cfg.DATASETS.get('SUPPRESS_BETAS_THRESH', 0.0)
SUPPRESS_BAD_POSES = cfg.DATASETS.get('SUPPRESS_BAD_POSES', False)
POSES_BETAS_SIMULTANEOUS = cfg.DATASETS.get('POSES_BETAS_SIMULTANEOUS', False)
BETAS_REG = cfg.DATASETS.get('BETAS_REG', False)
FILTER_NO_POSES = cfg.DATASETS.get('FILTER_NO_POSES', False)
FILTER_NUM_KP = cfg.DATASETS.get('FILTER_NUM_KP', 4)
FILTER_NUM_KP_THRESH = cfg.DATASETS.get('FILTER_NUM_KP_THRESH', 0.0)
FILTER_REPROJ_THRESH = cfg.DATASETS.get('FILTER_REPROJ_THRESH', 0.0)
FILTER_MIN_BBOX_SIZE = cfg.DATASETS.get('FILTER_MIN_BBOX_SIZE', 0.0)
if SUPPRESS_KP_CONF_THRESH > 0:
dataset = dataset.map(lambda x: suppress_bad_kps(x, thresh=SUPPRESS_KP_CONF_THRESH))
if SUPPRESS_BETAS_THRESH > 0:
dataset = dataset.map(lambda x: supress_bad_betas(x, thresh=SUPPRESS_BETAS_THRESH))
if SUPPRESS_BAD_POSES:
dataset = dataset.map(lambda x: supress_bad_poses(x))
if POSES_BETAS_SIMULTANEOUS:
dataset = dataset.map(lambda x: poses_betas_simultaneous(x))
if FILTER_NO_POSES:
dataset = dataset.select(lambda x: filter_no_poses(x))
if FILTER_NUM_KP > 0:
dataset = dataset.select(lambda x: filter_numkp(x, numkp=FILTER_NUM_KP, thresh=FILTER_NUM_KP_THRESH))
if FILTER_REPROJ_THRESH > 0:
dataset = dataset.select(lambda x: filter_reproj_error(x, thresh=FILTER_REPROJ_THRESH))
if FILTER_MIN_BBOX_SIZE > 0:
dataset = dataset.select(lambda x: filter_bbox_size(x, thresh=FILTER_MIN_BBOX_SIZE))
if BETAS_REG:
dataset = dataset.map(lambda x: set_betas_for_reg(x)) # NOTE: Must be at the end
use_skimage_antialias = cfg.DATASETS.get('USE_SKIMAGE_ANTIALIAS', False)
border_mode = {
'constant': cv2.BORDER_CONSTANT,
'replicate': cv2.BORDER_REPLICATE,
}[cfg.DATASETS.get('BORDER_MODE', 'constant')]
# Process the dataset further
dataset = dataset.map(lambda x: ImageDataset.process_webdataset_tar_item(x, train,
augm_config=cfg.DATASETS.CONFIG,
MEAN=MEAN, STD=STD, IMG_SIZE=IMG_SIZE,
BBOX_SHAPE=BBOX_SHAPE,
use_skimage_antialias=use_skimage_antialias,
border_mode=border_mode,
))
if epoch_size is not None:
dataset = dataset.with_epoch(epoch_size)
return dataset
@staticmethod
def process_webdataset_tar_item(item, train,
augm_config=None,
MEAN=DEFAULT_MEAN,
STD=DEFAULT_STD,
IMG_SIZE=DEFAULT_IMG_SIZE,
BBOX_SHAPE=None,
use_skimage_antialias=False,
border_mode=cv2.BORDER_CONSTANT,
):
# Read data from item
key = item['__key__']
image = item['jpg']
data = item['data.pyd']
mask = item['mask']
keypoints_2d = data['keypoints_2d']
keypoints_3d = data['keypoints_3d']
center = data['center']
scale = data['scale']
hand_pose = data['hand_pose']
betas = data['betas']
right = data['right']
has_hand_pose = data['has_hand_pose']
has_betas = data['has_betas']
# image_file = data['image_file']
# Process data
orig_keypoints_2d = keypoints_2d.copy()
center_x = center[0]
center_y = center[1]
bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max()
if bbox_size < 1:
breakpoint()
mano_params = {'global_orient': hand_pose[:3],
'hand_pose': hand_pose[3:],
'betas': betas
}
has_mano_params = {'global_orient': has_hand_pose,
'hand_pose': has_hand_pose,
'betas': has_betas
}
mano_params_is_axis_angle = {'global_orient': True,
'hand_pose': True,
'betas': False
}
augm_config = copy.deepcopy(augm_config)
# Crop image and (possibly) perform data augmentation
img_rgba = np.concatenate([image, mask.astype(np.uint8)[:,:,None]*255], axis=2) | img_patch_rgba, keypoints_2d, keypoints_3d, mano_params, has_mano_params, img_size, trans = get_example(img_rgba, | 1 | 2023-12-08 09:07:07+00:00 | 8k |
baidubce/app-builder | appbuilder/core/components/llms/base.py | [
{
"identifier": "GATEWAY_URL",
"path": "appbuilder/core/constants.py",
"snippet": "GATEWAY_URL = \"https://appbuilder.baidu.com\""
},
{
"identifier": "GATEWAY_INNER_URL",
"path": "appbuilder/core/constants.py",
"snippet": "GATEWAY_INNER_URL = \"http://appbuilder.sdns.baidu.com\""
},
{
"identifier": "Component",
"path": "appbuilder/core/component.py",
"snippet": "class Component:\n r\"\"\"Component基类, 其它实现的Component子类需要继承该基类,并至少实现run方法.\"\"\"\n\n def __init__(self,\n meta: Optional[ComponentArguments] = ComponentArguments(),\n secret_key: Optional[str] = None,\n gateway: str = \"\"\n ):\n r\"\"\"Component初始化方法.\n\n 参数:\n meta (obj: `ComponentArguments`, 可选) : component元信息.\n secret_key(str,可选): 用户鉴权token, 默认从环境变量中获取: os.getenv(\"APPBUILDER_TOKEN\", \"\").\n gateway(str, 可选): 后端网关服务地址,默认从环境变量中获取: os.getenv(\"GATEWAY_URL\", \"\")\n 返回:\n 无\n \"\"\"\n\n self.meta = meta\n self.http_client = HTTPClient(secret_key, gateway)\n\n def __call__(self, *inputs, **kwargs):\n r\"\"\"implement __call__ method\"\"\"\n return self.run(*inputs, **kwargs)\n\n def run(self, *inputs, **kwargs):\n r\"\"\"\n Defines the computation performed at every call.\n Should be overridden by all subclasses.\n\n Parameters:\n *inputs(tuple): unpacked tuple arguments\n **kwargs(dict): unpacked dict arguments\n \"\"\"\n raise NotImplementedError\n\n def batch(self, *args, **kwargs) -> List[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n async def arun(self, *args, **kwargs) -> Optional[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n async def abatch(self, *args, **kwargs) -> List[Message]:\n r\"\"\"pass\"\"\"\n return None\n\n def _trace(self, **data) -> None:\n r\"\"\"pass\"\"\"\n pass\n\n def _debug(self, **data) -> None:\n r\"\"\"pass\"\"\"\n pass"
},
{
"identifier": "Message",
"path": "appbuilder/core/message.py",
"snippet": "class Message(BaseModel, Generic[_T]):\n content: Optional[_T] = {}\n name: Optional[str] = \"msg\"\n mtype: Optional[str] = \"dict\"\n id: Optional[str] = str(uuid.uuid4())\n\n def __init__(self, content: Optional[_T] = None, **data):\n if content is not None:\n data['content'] = content\n super().__init__(**data)\n self.mtype = type(self.content).__name__\n\n def __str__(self):\n return f\"Message(name={self.name}, content={self.content}, mtype={self.mtype})\"\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(name={self.name!r}, content={self.content!r}, mtype={self.mtype!r})\""
},
{
"identifier": "_T",
"path": "appbuilder/core/message.py",
"snippet": "_T = TypeVar(\"_T\")"
},
{
"identifier": "logger",
"path": "appbuilder/utils/logger_util.py",
"snippet": "LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s.%(msecs)03d] %(filename)s [line:%(lineno)d] %(levelname)s [%(logid)s] %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout', # Use standard output\n },\n },\n 'loggers': {\n 'appbuilder': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\nclass LoggerWithLoggerId(logging.LoggerAdapter):\n def __init__(self, logger, extra, loglevel):\n def set_auto_logid(self):\n def set_logid(self, logid):\n def get_logid(self):\n def level(self):\n def process(self, msg, kwargs):\ndef _setup_logging():"
},
{
"identifier": "ComponentArguments",
"path": "appbuilder/core/component.py",
"snippet": "class ComponentArguments(BaseModel):\n r\"\"\"\"ComponentArguments define Component meta fields\"\"\"\n name: str = \"\"\n tool_desc: Dict[str, Any] = {}\n\n def extract_values_to_dict(self):\n r\"\"\"extract ComponentArguments fields to dict\"\"\"\n\n inputs = {}\n for field_name, field in self.__fields__.items():\n value = getattr(self, field_name)\n # 获取 display_name 元数据\n variable_name = field.field_info.extra.get('variable_name')\n if variable_name:\n # 使用 Enum 成员的实际值\n if isinstance(value, Message):\n inputs[variable_name] = str(value.content)\n elif isinstance(value, Enum):\n inputs[variable_name] = str(value.value)\n else:\n inputs[variable_name] = str(value)\n else:\n inputs[field_name] = value\n return inputs"
},
{
"identifier": "AppBuilderServerException",
"path": "appbuilder/core/_exception.py",
"snippet": "class AppBuilderServerException(BaseRPCException):\n r\"\"\"AppBuilderServerException represent backend server failed response.\n \"\"\"\n\n def __init__(self, request_id=\"\", code=\"\", message=\"\", service_err_code=\"\", service_err_message=\"\"):\n r\"\"\"__init__ a AppBuilderServerException instance.\n :param request_id: str, request unique id.\n :param code: str, backend .\n :rtype:\n \"\"\"\n super().__init__(\"request_id={}, code={}, message={}, service_err_code={}, service_err_message={} \".format(\n request_id, code, message, service_err_code, service_err_message))"
},
{
"identifier": "ModelInfo",
"path": "appbuilder/core/utils.py",
"snippet": "class ModelInfo:\n \"\"\" 模型信息类 \"\"\"\n\n def __init__(self, client: HTTPClient):\n \"\"\"根据模型名称获取并初始化模型信息\"\"\"\n self.client = client\n response = Models(client).list()\n self.model_list = [*response.result.common, *response.result.custom]\n\n def get_model_url(self, model_name: str) -> str:\n \"\"\"获取模型在工作台网关的请求url\"\"\"\n origin_name = model_name\n for key, value in model_name_mapping.items():\n if origin_name == value:\n origin_name = key\n break\n for model in self.model_list:\n if model.name == origin_name:\n return convert_cloudhub_url(self.client, model.url)\n raise ModelNotSupportedException(f\"Model[{model_name}] not available! \"\n f\"You can query available models through: appbuilder.get_model_list()\")\n\n def get_model_type(self, model_name: str) -> str:\n \"\"\"获取模型类型\"\"\"\n origin_name = model_name\n for key, value in model_name_mapping.items():\n if origin_name == value:\n origin_name = key\n break\n for model in self.model_list:\n if model.name == origin_name:\n return model.apiType\n raise ModelNotSupportedException(f\"Model[{model_name}] not available! \"\n f\"You can query available models through: appbuilder.get_model_list()\")"
},
{
"identifier": "SSEClient",
"path": "appbuilder/utils/sse_util.py",
"snippet": "class SSEClient:\n \"\"\"\n 一个简易的SSE Client,用于接收服务端发送的SSE事件。\n \"\"\"\n\n def __init__(self, event_source, char_enc='utf-8'):\n \"\"\"\n 通过现有的事件源初始化 SSE 客户端。\n 事件源应为二进制流,并具有 close() 方法。\n 这通常是实现 io.BinaryIOBase 的东西,比如 httplib 或 urllib3HTTPResponse 对象。\n \"\"\"\n logging.info(f'Initialized SSE client from event source {event_source}')\n self._event_source = event_source\n self._char_enc = char_enc\n\n def _read(self):\n \"\"\"\n 读取传入的事件源流并生成事件块。\n 不幸的是,有些服务器可能会决定在响应中将事件分解为多个HTTP块。\n 因此,有必要正确地将连续的响应块缝合在一起,并找到SSE分隔符(空的新行),以生成完整、正确的事件块。\n \"\"\"\n data = b''\n for chunk in self._event_source:\n for line in chunk.splitlines(True):\n data += line\n if data.endswith((b'\\r\\r', b'\\n\\n', b'\\r\\n\\r\\n')):\n yield data\n data = b''\n if data:\n yield data\n\n def events(self):\n \"\"\"\n 从给定的输入流中读取 Server-Side-Event (SSE) 数据,并生成解析后的 Event 对象。\n \n Args:\n 无\n \n Returns:\n generator: 解析后的 Event 对象的生成器。\n \"\"\"\n for chunk in self._read():\n event = Event()\n # Split before decoding so splitlines() only uses \\r and \\n\n for line in chunk.splitlines():\n # Decode the line.\n line = line.decode(self._char_enc)\n # Lines starting with a separator are comments and are to be\n # ignored.\n if not line.strip() or line.startswith(':'):\n continue\n logging.debug(f\"raw line: {line}\")\n data = line.split(':', 1)\n field = data[0]\n # Ignore unknown fields.\n if field not in event.__dict__:\n logging.info(f'Saw invalid field {field} while parsing Server Side Event')\n continue\n\n if len(data) > 1:\n # From the spec:\n # \"If value starts with a single U+0020 SPACE character,\n # remove it from value.\"\n if data[1].startswith(' '):\n value = data[1][1:]\n else:\n value = data[1]\n else:\n # If no value is present after the separator,\n # assume an empty value.\n value = ''\n # The data field may come over multiple lines and their values\n # are concatenated with each other.\n if field == 'data':\n event.__dict__[field] += value + '\\n'\n else:\n event.__dict__[field] = value\n # Events with no data are not dispatched.\n if not event.data:\n continue\n # If the data field ends with a newline, remove it.\n if event.data.endswith('\\n'):\n event.data = event.data[0:-1]\n # Empty event names default to 'message'\n event.event = event.event or 'message'\n # Dispatch the event\n logging.info(f'Dispatching {event}...')\n yield event\n\n def close(self):\n \"\"\"\n 手动关闭事件源流。\n \"\"\"\n self._event_source.close()"
},
{
"identifier": "AppBuilderServerException",
"path": "appbuilder/core/_exception.py",
"snippet": "class AppBuilderServerException(BaseRPCException):\n r\"\"\"AppBuilderServerException represent backend server failed response.\n \"\"\"\n\n def __init__(self, request_id=\"\", code=\"\", message=\"\", service_err_code=\"\", service_err_message=\"\"):\n r\"\"\"__init__ a AppBuilderServerException instance.\n :param request_id: str, request unique id.\n :param code: str, backend .\n :rtype:\n \"\"\"\n super().__init__(\"request_id={}, code={}, message={}, service_err_code={}, service_err_message={} \".format(\n request_id, code, message, service_err_code, service_err_message))"
},
{
"identifier": "ModelNotSupportedException",
"path": "appbuilder/core/_exception.py",
"snippet": "class ModelNotSupportedException(BaseRPCException):\n r\"\"\"ModelNotSupportedException represent model is not supported\n \"\"\"\n pass"
}
] | import itertools
import json
import uuid
import requests
from enum import Enum
from appbuilder.core.constants import GATEWAY_URL, GATEWAY_INNER_URL
from pydantic import BaseModel, Field, ValidationError, HttpUrl, validator
from pydantic.types import confloat
from appbuilder.core.component import Component
from appbuilder.core.message import Message, _T
from appbuilder.utils.logger_util import logger
from typing import Dict, List, Optional, Any
from appbuilder.core.component import ComponentArguments
from appbuilder.core._exception import AppBuilderServerException
from appbuilder.core.utils import ModelInfo
from appbuilder.utils.sse_util import SSEClient
from appbuilder.core._exception import AppBuilderServerException, ModelNotSupportedException
from collections.abc import Generator | 4,620 |
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
self.result = data.get("answer", None)
trace_log_list = data.get("trace_log", None)
if trace_log_list is not None:
for trace_log in trace_log_list:
key = trace_log["tool"]
result_list = trace_log["result"]
self.extra[key] = result_list
def parse_stream_data(self, parsed_str):
"""解析流式数据块并提取answer字段"""
try:
data = json.loads(parsed_str)
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
return data
except json.JSONDecodeError:
# 处理可能的解析错误
print("error: " + parsed_str)
raise AppBuilderServerException("unknown", "unknown", parsed_str)
def get_stream_data(self):
"""获取处理过的流式数据的迭代器"""
return self.result
def to_message(self):
"""将响应结果转换为Message对象。
Returns:
Message: Message对象。
"""
message = LLMMessage()
message.id = self.log_id
message.content = self.result
message.extra = self.extra
return self.message_iterable_wrapper(message)
def message_iterable_wrapper(self, message):
"""
对模型输出的 Message 对象进行包装。
当 Message 是流式数据时,数据被迭代完后,将重新更新 content 为 blocking 的字符串。
"""
class IterableWrapper:
def __init__(self, stream_content):
self._content = stream_content
self._concat = ""
self._extra = {}
def __iter__(self):
return self
def __next__(self):
try:
result_json = next(self._content)
char = result_json.get("answer", "")
result_list = result_json.get("result")
key = result_json.get("tool")
if result_list is not None:
self._extra[key] = result_list
message.extra = self._extra # Update the original extra
self._concat += char
return char
except StopIteration:
message.content = self._concat # Update the original content
raise
if isinstance(message.content, Generator):
# Replace the original content with the custom iterable
message.content = IterableWrapper(message.content)
return message
class CompletionBaseComponent(Component):
name: str
version: str
base_url: str = "/rpc/2.0/cloud_hub/v1/ai_engine/copilot_engine"
model_name: str = ""
model_url: str = ""
model_type: str = "chat"
excluded_models: List[str] = ["Yi-34B-Chat", "ChatLaw"]
model_info: ModelInfo = None
model_config: Dict[str, Any] = {
"model": {
"provider": "baidu",
"name": "ERNIE-Bot",
"completion_params": {
"temperature": 1e-10,
"top_p": 0,
"presence_penalty": 0,
"frequency_penalty": 0
}
}
}
def __init__(self, meta: ComponentArguments, model=None, secret_key: Optional[str] = None,
gateway: str = ""):
"""
Args:
meta (ComponentArguments): 组件参数信息
model (str, optional): 模型名称. Defaults to None.
secret_key (Optional[str], optional): 可选的密钥. Defaults to None.
gateway (str, optional): 网关地址. Defaults to "".
"""
super().__init__(meta=meta, secret_key=secret_key, gateway=gateway)
if model and model in self.excluded_models:
| # Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LLMMessage(Message):
content: Optional[_T] = {}
extra: Optional[Dict] = {}
def __str__(self):
return f"Message(name={self.name}, content={self.content}, mtype={self.mtype}, extra={self.extra})"
class CompletionRequest(object):
r"""ShortSpeechRecognitionRequest."""
params = None
response_mode = "blocking"
def __init__(self, params: Dict[str, Any] = None, response_mode: str = None, **kwargs):
r""" __init__ the client state.
"""
self.params = params
self.response_mode = response_mode
class ModelArgsConfig(BaseModel):
stream: bool = Field(default=False, description="是否流式响应。默认为 False。")
temperature: confloat(gt=0.0, le=1.0) = Field(default=1e-10, description="模型的温度参数,范围从 0.0 到 1.0。")
top_p: confloat(gt=0.0, le=1.0) = Field(default=1e-10, description="模型的top_p参数,范围从 0.0 到 1.0。")
class CompletionResponse(object):
r"""ShortSpeechRecognitionResponse."""
error_no = 0
error_msg = ""
result = None
log_id = ""
extra = None
def __init__(self, response, stream: bool = False):
"""初始化客户端状态。"""
self.error_no = 0
self.error_msg = ""
self.log_id = response.headers.get("X-Appbuilder-Request-Id", None)
self.extra = {}
if stream:
# 流式数据处理
def stream_data():
sse_client = SSEClient(response)
for event in sse_client.events():
if not event:
continue
answer = self.parse_stream_data(event.data)
if answer is not None:
yield answer
self.result = stream_data()
else:
# 非流式数据的处理
if response.status_code != 200:
self.error_no = response.status_code
self.error_msg = "error"
self.result = response.text
raise AppBuilderServerException(self.log_id, self.error_no, self.result)
else:
data = response.json()
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
self.result = data.get("answer", None)
trace_log_list = data.get("trace_log", None)
if trace_log_list is not None:
for trace_log in trace_log_list:
key = trace_log["tool"]
result_list = trace_log["result"]
self.extra[key] = result_list
def parse_stream_data(self, parsed_str):
"""解析流式数据块并提取answer字段"""
try:
data = json.loads(parsed_str)
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
return data
except json.JSONDecodeError:
# 处理可能的解析错误
print("error: " + parsed_str)
raise AppBuilderServerException("unknown", "unknown", parsed_str)
def get_stream_data(self):
"""获取处理过的流式数据的迭代器"""
return self.result
def to_message(self):
"""将响应结果转换为Message对象。
Returns:
Message: Message对象。
"""
message = LLMMessage()
message.id = self.log_id
message.content = self.result
message.extra = self.extra
return self.message_iterable_wrapper(message)
def message_iterable_wrapper(self, message):
"""
对模型输出的 Message 对象进行包装。
当 Message 是流式数据时,数据被迭代完后,将重新更新 content 为 blocking 的字符串。
"""
class IterableWrapper:
def __init__(self, stream_content):
self._content = stream_content
self._concat = ""
self._extra = {}
def __iter__(self):
return self
def __next__(self):
try:
result_json = next(self._content)
char = result_json.get("answer", "")
result_list = result_json.get("result")
key = result_json.get("tool")
if result_list is not None:
self._extra[key] = result_list
message.extra = self._extra # Update the original extra
self._concat += char
return char
except StopIteration:
message.content = self._concat # Update the original content
raise
if isinstance(message.content, Generator):
# Replace the original content with the custom iterable
message.content = IterableWrapper(message.content)
return message
class CompletionBaseComponent(Component):
name: str
version: str
base_url: str = "/rpc/2.0/cloud_hub/v1/ai_engine/copilot_engine"
model_name: str = ""
model_url: str = ""
model_type: str = "chat"
excluded_models: List[str] = ["Yi-34B-Chat", "ChatLaw"]
model_info: ModelInfo = None
model_config: Dict[str, Any] = {
"model": {
"provider": "baidu",
"name": "ERNIE-Bot",
"completion_params": {
"temperature": 1e-10,
"top_p": 0,
"presence_penalty": 0,
"frequency_penalty": 0
}
}
}
def __init__(self, meta: ComponentArguments, model=None, secret_key: Optional[str] = None,
gateway: str = ""):
"""
Args:
meta (ComponentArguments): 组件参数信息
model (str, optional): 模型名称. Defaults to None.
secret_key (Optional[str], optional): 可选的密钥. Defaults to None.
gateway (str, optional): 网关地址. Defaults to "".
"""
super().__init__(meta=meta, secret_key=secret_key, gateway=gateway)
if model and model in self.excluded_models: | raise ModelNotSupportedException(f"Model {model} not supported") | 11 | 2023-12-05 01:48:12+00:00 | 8k |
corfyi/UCMCTrack | util/run_ucmc.py | [
{
"identifier": "Detector",
"path": "detector/detector.py",
"snippet": "class Detector:\n def __init__(self):\n self.seq_length = 0\n self.gmc = None\n\n def load(self,cam_para_file, det_file, gmc_file = None):\n self.mapper = Mapper(cam_para_file,\"MOT17\")\n self.load_detfile(det_file)\n\n if gmc_file is not None:\n self.gmc = GMCLoader(gmc_file)\n\n def load_detfile(self, filename):\n \n self.dets = dict()\n # 打开文本文件filename\n with open(filename, 'r') as f:\n # 读取文件中的每一行\n for line in f.readlines():\n # 将每一行的内容按照空格分开\n line = line.strip().split(',')\n frame_id = int(line[0])\n if frame_id > self.seq_length:\n self.seq_length = frame_id\n det_id = int(line[1])\n # 新建一个Detection对象\n det = Detection(det_id)\n det.bb_left = float(line[2])\n det.bb_top = float(line[3])\n det.bb_width = float(line[4])\n det.bb_height = float(line[5])\n det.conf = float(line[6])\n det.det_class = int(line[7])\n if det.det_class == -1:\n det.det_class = 0\n \n det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])\n\n # 将det添加到字典中\n if frame_id not in self.dets:\n self.dets[frame_id] = []\n self.dets[frame_id].append(det)\n\n def get_dets(self, frame_id,conf_thresh = 0,det_class = 0):\n dets = self.dets[frame_id]\n dets = [det for det in dets if det.det_class == det_class and det.conf >= conf_thresh]\n return dets\n \n\n\n def cmc(self,x,y,w,h,frame_id):\n u,v = self.mapper.xy2uv(x,y)\n affine = self.gmc.get_affine(frame_id)\n M = affine[:,:2]\n T = np.zeros((2,1))\n T[0,0] = affine[0,2]\n T[1,0] = affine[1,2]\n\n p_center = np.array([[u],[v-h/2]])\n p_wh = np.array([[w],[h]])\n p_center = np.dot(M,p_center) + T\n p_wh = np.dot(M,p_wh)\n\n u = p_center[0,0]\n v = p_center[1,0]+p_wh[1,0]/2\n\n xy,_ = self.mapper.uv2xy(np.array([[u],[v]]),np.eye(2))\n\n return xy[0,0],xy[1,0]"
},
{
"identifier": "Detection",
"path": "detector/detector.py",
"snippet": "class Detection:\n\n def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):\n self.id = id\n self.bb_left = bb_left\n self.bb_top = bb_top\n self.bb_width = bb_width\n self.bb_height = bb_height\n self.conf = conf\n self.det_class = det_class\n self.track_id = 0\n self.y = np.zeros((2, 1))\n self.R = np.eye(4)\n\n def get_box(self):\n return [self.bb_left, self.bb_top, self.bb_width, self.bb_height]\n\n\n def __str__(self):\n return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(\n self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,\n self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])\n\n def __repr__(self):\n return self.__str__()"
},
{
"identifier": "UCMCTrack",
"path": "tracker/ucmc.py",
"snippet": "class UCMCTrack(object):\n def __init__(self,a1,a2,wx, wy,vmax, max_age, fps, dataset, high_score, use_cmc,detector = None):\n self.wx = wx\n self.wy = wy\n self.vmax = vmax\n self.dataset = dataset\n self.high_score = high_score\n self.max_age = max_age\n self.a1 = a1\n self.a2 = a2\n self.dt = 1.0/fps\n\n self.use_cmc = use_cmc\n\n self.trackers = []\n self.confirmed_idx = []\n self.coasted_idx = []\n self.tentative_idx = []\n\n self.detector = detector\n\n\n def update(self, dets,frame_id):\n \n self.data_association(dets,frame_id)\n \n self.associate_tentative(dets)\n \n self.initial_tentative(dets)\n \n self.delete_old_trackers()\n \n self.update_status(dets)\n \n def data_association(self, dets,frame_id):\n # Separate detections into high score and low score\n detidx_high = []\n detidx_low = []\n for i in range(len(dets)):\n if dets[i].conf >= self.high_score:\n detidx_high.append(i)\n else:\n detidx_low.append(i)\n\n # Predcit new locations of tracks\n for track in self.trackers:\n track.predict()\n if self.use_cmc:\n x,y = self.detector.cmc(track.kf.x[0,0],track.kf.x[2,0],track.w,track.h,frame_id)\n track.kf.x[0,0] = x\n track.kf.x[2,0] = y\n \n trackidx_remain = []\n self.detidx_remain = []\n\n # Associate high score detections with tracks\n trackidx = self.confirmed_idx + self.coasted_idx\n num_det = len(detidx_high)\n num_trk = len(trackidx)\n\n for trk in self.trackers:\n trk.detidx = -1\n\n if num_det*num_trk > 0:\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = detidx_high[i]\n for j in range(num_trk):\n trk_idx = trackidx[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix, self.a1)\n \n for i in unmatched_a:\n self.detidx_remain.append(detidx_high[i])\n for i in unmatched_b:\n trackidx_remain.append(trackidx[i])\n \n for i,j in matched_indices:\n det_idx = detidx_high[i]\n trk_idx = trackidx[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].detidx = det_idx\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n dets[det_idx].track_id = self.trackers[trk_idx].id\n\n else:\n self.detidx_remain = detidx_high\n trackidx_remain = trackidx\n\n \n # Associate low score detections with remain tracks\n num_det = len(detidx_low)\n num_trk = len(trackidx_remain)\n if num_det*num_trk > 0:\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = detidx_low[i]\n for j in range(num_trk):\n trk_idx = trackidx_remain[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a2)\n \n\n for i in unmatched_b:\n trk_idx = trackidx_remain[i]\n self.trackers[trk_idx].status = TrackStatus.Coasted\n # self.trackers[trk_idx].death_count += 1\n self.trackers[trk_idx].detidx = -1\n\n for i,j in matched_indices:\n det_idx = detidx_low[i]\n trk_idx = trackidx_remain[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].detidx = det_idx\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n dets[det_idx].track_id = self.trackers[trk_idx].id\n\n\n def associate_tentative(self, dets):\n num_det = len(self.detidx_remain)\n num_trk = len(self.tentative_idx)\n\n cost_matrix = np.zeros((num_det, num_trk))\n for i in range(num_det):\n det_idx = self.detidx_remain[i]\n for j in range(num_trk):\n trk_idx = self.tentative_idx[j]\n cost_matrix[i,j] = self.trackers[trk_idx].distance(dets[det_idx].y, dets[det_idx].R)\n \n matched_indices,unmatched_a,unmatched_b = linear_assignment(cost_matrix,self.a1)\n\n for i,j in matched_indices:\n det_idx = self.detidx_remain[i]\n trk_idx = self.tentative_idx[j]\n self.trackers[trk_idx].update(dets[det_idx].y, dets[det_idx].R)\n self.trackers[trk_idx].death_count = 0\n self.trackers[trk_idx].birth_count += 1\n self.trackers[trk_idx].detidx = det_idx\n dets[det_idx].track_id = self.trackers[trk_idx].id\n if self.trackers[trk_idx].birth_count >= 2:\n self.trackers[trk_idx].birth_count = 0\n self.trackers[trk_idx].status = TrackStatus.Confirmed\n\n for i in unmatched_b:\n trk_idx = self.tentative_idx[i]\n # self.trackers[trk_idx].death_count += 1\n self.trackers[trk_idx].detidx = -1\n\n \n unmatched_detidx = []\n for i in unmatched_a:\n unmatched_detidx.append(self.detidx_remain[i])\n self.detidx_remain = unmatched_detidx\n\n \n \n def initial_tentative(self,dets):\n for i in self.detidx_remain: \n self.trackers.append(KalmanTracker(dets[i].y,dets[i].R,self.wx,self.wy,self.vmax, dets[i].bb_width,dets[i].bb_height,self.dt))\n self.trackers[-1].status = TrackStatus.Tentative\n self.trackers[-1].detidx = i\n self.detidx_remain = []\n\n def delete_old_trackers(self):\n i = len(self.trackers)\n for trk in reversed(self.trackers):\n trk.death_count += 1\n i -= 1 \n if ( trk.status == TrackStatus.Coasted and trk.death_count >= self.max_age) or ( trk.status == TrackStatus.Tentative and trk.death_count >= 2):\n self.trackers.pop(i)\n\n def update_status(self,dets):\n self.confirmed_idx = []\n self.coasted_idx = []\n self.tentative_idx = []\n for i in range(len(self.trackers)):\n\n detidx = self.trackers[i].detidx\n if detidx >= 0 and detidx < len(dets):\n self.trackers[i].h = dets[detidx].bb_height\n self.trackers[i].w = dets[detidx].bb_width\n\n if self.trackers[i].status == TrackStatus.Confirmed:\n self.confirmed_idx.append(i)\n elif self.trackers[i].status == TrackStatus.Coasted:\n self.coasted_idx.append(i)\n elif self.trackers[i].status == TrackStatus.Tentative:\n self.tentative_idx.append(i)"
},
{
"identifier": "TrackStatus",
"path": "tracker/kalman.py",
"snippet": "class TrackStatus(Enum):\n Tentative = 0\n Confirmed = 1\n Coasted = 2"
},
{
"identifier": "interpolate",
"path": "eval/interpolation.py",
"snippet": "def interpolate(txt_path, save_path, n_min=3, n_dti=20, is_enable = True):\n mkdir_if_missing(txt_path)\n mkdir_if_missing(save_path)\n if is_enable:\n dti(txt_path, save_path, n_min, n_dti)\n else:\n #拷贝txt_path下的文件到save_path\n for file in os.listdir(txt_path):\n if file.endswith(\".txt\"):\n shutil.copy(os.path.join(txt_path,file),os.path.join(save_path,file))"
}
] | from detector.detector import Detector, Detection
from tracker.ucmc import UCMCTrack
from tracker.kalman import TrackStatus
from eval.interpolation import interpolate
import os,time
import argparse | 4,011 |
class Tracklet():
def __init__(self,frame_id,box):
self.is_active = False
self.boxes = dict()
self.boxes[frame_id] = box
def add_box(self, frame_id, box):
self.boxes[frame_id] = box
def activate(self):
self.is_active = True
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
|
class Tracklet():
def __init__(self,frame_id,box):
self.is_active = False
self.boxes = dict()
self.boxes[frame_id] = box
def add_box(self, frame_id, box):
self.boxes[frame_id] = box
def activate(self):
self.is_active = True
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
| tracker = UCMCTrack(a1, a2, wx,wy,vmax, cdt, fps, dataset, high_score,args.cmc,detector) | 2 | 2023-12-12 07:29:20+00:00 | 8k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.